diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_backend.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_backend.py
index 780245bc2af..0bd2b7ba14d 100644
--- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_backend.py
+++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_backend.py
@@ -13,9 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
#
-import time
import mock
+import os
+import paramiko
+import time
+
from oslo_concurrency import processutils as putils
from oslo_config import cfg
@@ -23,41 +26,37 @@ from cinder import exception
from cinder import test
from cinder import utils
from cinder.volume.drivers.hitachi import hnas_backend
-from cinder.volume.drivers.hitachi import hnas_nfs as nfs
CONF = cfg.CONF
-HNAS_RESULT1 = "\n\
+evsfs_list = "\n\
FS ID FS Label FS Permanent ID EVS ID EVS Label\n\
----- ----------- ------------------ ------ ---------\n\
- 1026 gold 0xaadee0e035cfc0b7 1 EVSTest1\n\
- 1025 fs01-husvm 0xaada5dff78668800 1 EVSTest1\n\
- 1027 large-files 0xaadee0ef012a0d54 1 EVSTest1\n\
- 1028 platinun 0xaadee1ea49d1a32c 1 EVSTest1\n\
- 1029 test_hdp 0xaadee09634acfcac 1 EVSTest1\n\
- 1030 cinder1 0xaadfcf742fba644e 1 EVSTest1\n\
- 1031 cinder2 0xaadfcf7e0769a6bc 1 EVSTest1\n\
- 1024 fs02-husvm 0xaac8715e2e9406cd 2 EVSTest2\n\
+ 1026 gold 0xaadee0e035cfc0b7 1 EVS-Manila\n\
+ 1029 test_hdp 0xaadee09634acfcac 1 EVS-Manila\n\
+ 1030 fs-cinder 0xaadfcf742fba644e 2 EVS-Cinder\n\
+ 1031 cinder2 0xaadfcf7e0769a6bc 3 EVS-Test\n\
+ 1024 fs02-husvm 0xaac8715e2e9406cd 3 EVS-Test\n\
\n"
-HNAS_RESULT2 = "cluster MAC: 83-68-96-AA-DA-5D"
+cluster_getmac = "cluster MAC: 83-68-96-AA-DA-5D"
-HNAS_RESULT3 = "\n\
-Model: HNAS 4040 \n\
-Software: 11.2.3319.14 (built 2013-09-19 12:34:24+01:00) \n\
-Hardware: NAS Platform (M2SEKW1339109) \n\
+version = "\n\
+Model: HNAS 4040 \n\n\
+Software: 11.2.3319.14 (built 2013-09-19 12:34:24+01:00) \n\n\
+Hardware: NAS Platform (M2SEKW1339109) \n\n\
board MMB1 \n\
-mmb 11.2.3319.14 release (2013-09-19 12:34:24+01:00)\n\
+mmb 11.2.3319.14 release (2013-09-19 12:34:24+01:00)\n\n\
board MFB1 \n\
mfb1hw MB v0883 WL v002F TD v002F FD v002F TC v0059 \
RY v0059 TY v0059 IC v0059 WF v00E2 FS v00E2 OS v00E2 \
WD v00E2 DI v001A FC v0002 \n\
-Serial no B1339745 (Thu Jan 1 00:00:50 2009) \n\
+Serial no B1339745 (Thu Jan 1 00:00:50 2009) \n\n\
board MCP \n\
Serial no B1339109 (Thu Jan 1 00:00:49 2009) \n\
\n"
-HNAS_RESULT4 = "\n\
+evsipaddr = "\n\
EVS Type Label IP Address Mask Port \n\
---------- --------------- ------------------ --------------- ------\n\
admin hnas4040 192.0.2.2 255.255.255.0 eth1 \n\
@@ -67,60 +66,19 @@ evs 1 EVSTest1 10.0.0.20 255.255.255.0 ag1 \n\
evs 2 EVSTest2 172.24.44.21 255.255.255.0 ag1 \n\
\n"
-HNAS_RESULT5 = "\n\
- ID Label EVS Size Used Snapshots Deduped\
- Avail Thin ThinSize ThinAvail \
- FS Type \n\
----- ----------- --- ------- ------------- --------- -------\
-- ------------- ---- -------- --------- ---------------------\
-------------- \n\
-1025 fs01-husvm 1 250 GB 21.4 GB (9%) 0 B (0%) NA \
- 228 GB (91%) No 32 KB,\
- WFS-2,128 DSBs\n\
-1026 gold 1 19.9 GB 2.30 GB (12% NA 0 B (0%)\
- 17.6 GB (88%) No 4 KB,WFS-2,128 DSBs,\
- dedupe enabled\n\
-1027 large-files 1 19.8 GB 2.43 GB (12%) 0 B (0%) NA \
- 17.3 GB (88%) No 32 KB,\
- WFS-2,128 DSBs\n\
-1028 platinun 1 19.9 GB 2.30 GB (12%) NA 0 B (0%)\
- 17.6 GB (88%) No 4 KB,WFS-2,128 DSBs,\
- dedupe enabled\n\
-1029 silver 1 19.9 GB 3.19 GB (16%) 0 B (0%) NA \
- 6.7 GB (84%) No 4 KB,\
- WFS-2,128 DSBs\n\
-1030 cinder1 1 40.8 GB 2.24 GB (5%) 0 B (0%) NA \
- 38.5 GB (95%) No 4 KB,\
- WFS-2,128 DSBs\n\
-1031 cinder2 1 39.8 GB 2.23 GB (6%) 0 B (0%) NA \
- 37.6 GB (94%) No 4 KB,\
- WFS-2,128 DSBs\n\
-1024 fs02-husvm 2 49.8 GB 3.54 GB (7%) 0 B (0%) NA \
- 46.2 GB (93%) No 32 KB,\
- WFS-2,128 DSBs\n\
-1032 test 2 3.97 GB 2.12 GB (53%) 0 B (0%) NA \
- 1.85 GB (47%) No 4 KB,\
- WFS-2,128 DSBs\n\
-1058 huge_FS 7 1.50 TB Not determined\n\
-1053 fs-unmounted 4 108 GB Not mounted \
- NA 943 MB (18%) 39.2 GB (36%) No 4 KB,\
- WFS-2,128 DSBs,dedupe enabled\n\
-\n"
-
-HNAS_RESULT6 = "\n\
+df_f = "\n\
ID Label EVS Size Used Snapshots Deduped Avail \
Thin ThinSize ThinAvail FS Type\n\
---- ---------- --- ------ ------------ --------- ------- ------------ \
---- -------- --------- --------------------\n\
-1025 fs01-husvm 1 250 GB 21.4 GB (9%) 0 B (0%) NA 228 GB (91%) \
+1025 fs-cinder 2 250 GB 21.4 GB (9%) 0 B (0%) NA 228 GB (91%) \
No 32 KB,WFS-2,128 DSBs\n\
\n"
-HNAS_RESULT7 = "\n\
-Export configuration: \n\
+nfs_export = "\n\
Export name: /export01-husvm \n\
Export path: /export01-husvm \n\
-File system label: test_hdp \n\
+File system label: fs-cinder \n\
File system size: 250 GB \n\
File system free space: 228 GB \n\
File system state: \n\
@@ -133,79 +91,83 @@ Display snapshots: Yes \n\
Read Caching: Disabled \n\
Disaster recovery setting: \n\
Recovered = No \n\
-Transfer setting = Use file system default \n\
+Transfer setting = Use file system default \n\n\
+Export configuration: \n\
+127.0.0.1 \n\
\n"
-HNAS_RESULT8 = "Logical unit creation started at 2014-12-24 00:38:30+00:00."
-HNAS_RESULT9 = "Logical unit deleted successfully."
-HNAS_RESULT10 = ""
-HNAS_RESULT11 = "Logical unit expansion started at 2014-12-24 01:25:03+00:00."
-
-HNAS_RESULT12 = "\n\
-Alias : test_iqn \n\
-Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-silver \n\
-Comment : \n\
-Secret : test_secret \n\
-Authentication : Enabled \n\
-Logical units : No logical units. \n\
-\n"
-
-HNAS_RESULT13 = "Logical unit added successfully."
-HNAS_RESULT14 = "Logical unit removed successfully."
-HNAS_RESULT15 = "Target created successfully."
-HNAS_RESULT16 = ""
-
-HNAS_RESULT17 = "\n\
-EVS Type Label IP Address Mask Port \n\
----------- --------------- ------------------ --------------- ------\n\
-evs 1 EVSTest1 172.24.44.20 255.255.255.0 ag1 \n\
-evs 2 EVSTest1 10.0.0.20 255.255.255.0 ag1 \n\
-\n"
-
-HNAS_RESULT18 = "Version: 11.1.3225.01\n\
-Directory: /u/u60/_Eng_Axalon_SMU/OfficialBuilds/fish/angel/3225.01/main/bin/\
-x86_64_linux-bart_libc-2.7_release\n\
-Date: Feb 22 2013, 04:10:09\n\
-\n"
-
-HNAS_RESULT19 = " ID Label Size Used Snapshots \
-Deduped Avail Thin ThinSize ThinAvail FS Type\n\
----- ------------- ------- ------------- --------- ------- -------------\
----- -------- --------- -------------------\n\
-1025 fs01-husvm 250 GB 47.1 GB (19%) 0 B (0%) NA 203 GB (81%)\
- No 4 KB,WFS-2,128 DSBs\n\
-1047 manage_test02 19.9 GB 9.29 GB (47%) 0 B (0%) NA 10.6 GB (53%)\
- No 4 KB,WFS-2,128 DSBs\n\
-1058 huge_FS 7 1.50 TB Not determined\n\
-1053 fs-unmounted 4 108 GB Not mounted \
- NA 943 MB (18%) 39.2 GB (36%) No 4 KB,\
- WFS-2,128 DSBs,dedupe enabled\n\
-\n"
-
-HNAS_RESULT20 = "\n\
-Alias : test_iqn \n\
-Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-silver \n\
-Comment : \n\
-Secret : \n\
-Authentication : Enabled \n\
-Logical units : No logical units. \n\
-\n"
-
-HNAS_RESULT20 = "Target does not exist."
-
-HNAS_RESULT21 = "Target created successfully."
-
-HNAS_RESULT22 = "Failed to establish SSC connection"
-
-HNAS_RESULT23 = "\n\
-Alias : cinder-Gold\n\
-Globally unique name: iqn.2015-06.10.10.10.10:evstest1.cinder-gold\n\
-Comment :\n\
-Secret : None\n\
-Authentication : Enabled\n\
-Logical units : No logical units.\n\
-Access configuration :\n\
+iscsi_one_target = "\n\
+Alias : cinder-default \n\
+Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-default \n\
+Comment : \n\
+Secret : pxr6U37LZZJBoMc \n\
+Authentication : Enabled \n\
+Logical units : No logical units. \n\
\n\
+ LUN Logical Unit \n\
+ ---- -------------------------------- \n\
+ 0 cinder-lu \n\
+ 1 volume-99da7ae7-1e7f-4d57-8bf... \n\
+\n\
+Access configuration: \n\
+"
+
+df_f_single_evs = "\n\
+ID Label Size Used Snapshots Deduped Avail \
+Thin ThinSize ThinAvail FS Type\n\
+---- ---------- ------ ------------ --------- ------- ------------ \
+---- -------- --------- --------------------\n\
+1025 fs-cinder 250 GB 21.4 GB (9%) 0 B (0%) NA 228 GB (91%) \
+ No 32 KB,WFS-2,128 DSBs\n\
+\n"
+
+nfs_export_tb = "\n\
+Export name: /export01-husvm \n\
+Export path: /export01-husvm \n\
+File system label: fs-cinder \n\
+File system size: 250 TB \n\
+File system free space: 228 TB \n\
+\n"
+
+nfs_export_not_available = "\n\
+Export name: /export01-husvm \n\
+Export path: /export01-husvm \n\
+File system label: fs-cinder \n\
+ *** not available *** \n\
+\n"
+
+evs_list = "\n\
+Node EVS ID Type Label Enabled Status IP Address Port \n\
+---- ------ ------- --------------- ------- ------ ------------------- ---- \n\
+ 1 Cluster hnas4040 Yes Online 192.0.2.200 eth1 \n\
+ 1 0 Admin hnas4040 Yes Online 192.0.2.2 eth1 \n\
+ 172.24.44.15 eth0 \n\
+ 172.24.49.101 ag2 \n\
+ 1 1 Service EVS-Manila Yes Online 172.24.49.32 ag2 \n\
+ 172.24.48.32 ag4 \n\
+ 1 2 Service EVS-Cinder Yes Online 172.24.49.21 ag2 \n\
+ 1 3 Service EVS-Test Yes Online 192.168.100.100 ag2 \n\
+\n"
+
+iscsilu_list = "Name : cinder-lu \n\
+Comment: \n\
+Path : /.cinder/cinder-lu.iscsi \n\
+Size : 2 GB \n\
+File System : fs-cinder \n\
+File System Mounted : YES \n\
+Logical Unit Mounted: No"
+
+iscsilu_list_tb = "Name : test-lu \n\
+Comment: \n\
+Path : /.cinder/test-lu.iscsi \n\
+Size : 2 TB \n\
+File System : fs-cinder \n\
+File System Mounted : YES \n\
+Logical Unit Mounted: No"
+
+add_targetsecret = "Target created successfully."
+
+iscsi_target_list = "\n\
Alias : cinder-GoldIsh\n\
Globally unique name: iqn.2015-06.10.10.10.10:evstest1.cinder-goldish\n\
Comment :\n\
@@ -218,462 +180,607 @@ Alias : cinder-default\n\
Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-default\n\
Comment :\n\
Secret : pxr6U37LZZJBoMc\n\
-Authentication : Disabled\n\
+Authentication : Enabled\n\
Logical units : Logical units :\n\
\n\
LUN Logical Unit\n\
---- --------------------------------\n\
- 0 volume-8ddd1a54-9daf-4fa5-842...\n\
+ 0 cinder-lu\n\
1 volume-99da7ae7-1e7f-4d57-8bf...\n\
\n\
Access configuration :\n\
"
-HNAS_RESULT24 = "Logical unit modified successfully."
-HNAS_RESULT25 = "Current selected file system: HNAS-iSCSI-TEST, number(32)."
+backend_opts = {'mgmt_ip0': '0.0.0.0',
+ 'cluster_admin_ip0': None,
+ 'ssh_port': '22',
+ 'username': 'supervisor',
+ 'password': 'supervisor',
+ 'ssh_private_key': 'test_key'}
-HNAS_RESULT26 = "Name : volume-test \n\
-Comment: \n\
-Path : /.cinder/volume-test.iscsi \n\
-Size : 2 GB \n\
-File System : fs1 \n\
-File System Mounted : YES \n\
-Logical Unit Mounted: No"
-
-HNAS_RESULT27 = "Connection reset"
+target_chap_disable = "\n\
+Alias : cinder-default \n\
+Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-default \n\
+Comment : \n\
+Secret : \n\
+Authentication : Disabled \n\
+Logical units : No logical units. \n\
+\n\
+ LUN Logical Unit \n\
+ ---- -------------------------------- \n\
+ 0 cinder-lu \n\
+ 1 volume-99da7ae7-1e7f-4d57-8bf... \n\
+\n\
+Access configuration: \n\
+"
-HNAS_CMDS = {
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'evsfs', 'list'):
- ["%s" % HNAS_RESULT1, ""],
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'cluster-getmac',):
- ["%s" % HNAS_RESULT2, ""],
- ('ssh', '-version',): ["%s" % HNAS_RESULT18, ""],
- ('ssh', '-u', 'supervisor', '-p', 'supervisor', '0.0.0.0', 'ver',):
- ["%s" % HNAS_RESULT3, ""],
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'ver',):
- ["%s" % HNAS_RESULT3, ""],
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'evsipaddr', '-l'):
- ["%s" % HNAS_RESULT4, ""],
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'df', '-a'):
- ["%s" % HNAS_RESULT5, ""],
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'df', '-f', 'test_hdp'):
- ["%s" % HNAS_RESULT6, ""],
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'for-each-evs', '-q',
- 'nfs-export', 'list'):
- ["%s" % HNAS_RESULT7, ""],
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor',
- 'console-context', '--evs', '1', 'iscsi-lu', 'add', '-e', 'test_name',
- 'test_hdp', '/.cinder/test_name.iscsi',
- '1M'):
- ["%s" % HNAS_RESULT8, ""],
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor',
- 'console-context', '--evs', '1', 'iscsi-lu', 'del', '-d', '-f',
- 'test_lun'):
- ["%s" % HNAS_RESULT9, ""],
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor',
- 'console-context', '--evs', '1', 'file-clone-create', '-f', 'fs01-husvm',
- '/.cinder/test_lu.iscsi', 'cloned_lu'):
- ["%s" % HNAS_RESULT10, ""],
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor',
- 'console-context', '--evs', '1', 'iscsi-lu', 'expand', 'expanded_lu',
- '1M'):
- ["%s" % HNAS_RESULT11, ""],
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor',
- 'console-context', '--evs', '1', 'iscsi-target', 'list', 'test_iqn'):
- ["%s" % HNAS_RESULT12, ""],
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor',
- 'console-context', '--evs', '1', 'iscsi-target', 'addlu', 'test_iqn',
- 'test_lun', '0'):
- ["%s" % HNAS_RESULT13, ""],
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor',
- 'console-context', '--evs', '1', 'iscsi-target', 'dellu', 'test_iqn',
- 0):
- ["%s" % HNAS_RESULT14, ""],
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor',
- 'console-context', '--evs', '1', 'iscsi-target', 'add', 'myTarget',
- 'secret'):
- ["%s" % HNAS_RESULT15, ""],
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor',
- 'console-context', '--evs', '1', 'iscsi-target', 'mod', '-s',
- 'test_secret', '-a', 'enable', 'test_iqn'): ["%s" % HNAS_RESULT15, ""],
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor',
- 'console-context', '--evs', '1', 'iscsi-lu', 'clone', '-e', 'test_lu',
- 'test_clone',
- '/.cinder/test_clone.iscsi'):
- ["%s" % HNAS_RESULT16, ""],
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'evsipaddr', '-e', '1'):
- ["%s" % HNAS_RESULT17, ""],
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor',
- 'console-context', '--evs', '1', 'iscsi-target', 'list'):
- ["%s" % HNAS_RESULT23, ""],
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs',
- '1', 'iscsi-target', 'addlu', 'cinder-default',
- 'volume-8ddd1a54-0000-0000-0000', '2'):
- ["%s" % HNAS_RESULT13, ""],
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs',
- '1', 'selectfs', 'fs01-husvm'):
- ["%s" % HNAS_RESULT25, ""],
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs',
- '1', 'iscsi-lu', 'list', 'test_lun'):
- ["%s" % HNAS_RESULT26, ""],
- ('ssh', '0.0.0.0', 'supervisor', 'supervisor', 'console-context', '--evs',
- '1', 'iscsi-lu', 'mod', '-n', 'vol_test', 'new_vol_test'):
- ["%s" % HNAS_RESULT24, ""]
-}
-
-DRV_CONF = {'ssh_enabled': 'True',
- 'mgmt_ip0': '0.0.0.0',
- 'cluster_admin_ip0': None,
- 'ssh_port': '22',
- 'ssh_private_key': 'test_key',
- 'username': 'supervisor',
- 'password': 'supervisor'}
-
-UTILS_EXEC_OUT = ["output: test_cmd", ""]
-
-
-def m_run_cmd(*args, **kargs):
- return HNAS_CMDS.get(args)
-
-
-class HDSHNASBendTest(test.TestCase):
+class HDSHNASBackendTest(test.TestCase):
def __init__(self, *args, **kwargs):
- super(HDSHNASBendTest, self).__init__(*args, **kwargs)
+ super(HDSHNASBackendTest, self).__init__(*args, **kwargs)
- @mock.patch.object(nfs, 'factory_bend')
- def setUp(self, m_factory_bend):
- super(HDSHNASBendTest, self).setUp()
- self.hnas_bend = hnas_backend.HnasBackend(DRV_CONF)
+ def setUp(self):
+ super(HDSHNASBackendTest, self).setUp()
+ self.hnas_backend = hnas_backend.HNASSSHBackend(backend_opts)
- @mock.patch('six.moves.builtins.open')
- @mock.patch('os.path.isfile', return_value=True)
- @mock.patch('paramiko.RSAKey.from_private_key_file')
- @mock.patch('paramiko.SSHClient')
- @mock.patch.object(putils, 'ssh_execute',
- return_value=(HNAS_RESULT5, ''))
- @mock.patch.object(utils, 'execute')
- @mock.patch.object(time, 'sleep')
- def test_run_cmd(self, m_sleep, m_utl, m_ssh, m_ssh_cli, m_pvt_key,
- m_file, m_open):
- self.flags(ssh_hosts_key_file='/var/lib/cinder/ssh_known_hosts',
- state_path='/var/lib/cinder')
+ def test_run_cmd(self):
+ self.mock_object(os.path, 'isfile',
+ mock.Mock(return_value=True))
+ self.mock_object(utils, 'execute')
+ self.mock_object(time, 'sleep')
+ self.mock_object(paramiko, 'SSHClient')
+ self.mock_object(paramiko.RSAKey, 'from_private_key_file')
+ self.mock_object(putils, 'ssh_execute',
+ mock.Mock(return_value=(df_f, '')))
- # Test main flow
- self.hnas_bend.drv_configs['ssh_enabled'] = 'True'
- out, err = self.hnas_bend.run_cmd('ssh', '0.0.0.0',
- 'supervisor', 'supervisor',
- 'df', '-a')
- self.assertIn('fs01-husvm', out)
+ out, err = self.hnas_backend._run_cmd('ssh', '0.0.0.0',
+ 'supervisor', 'supervisor',
+ 'df', '-a')
+
+ self.assertIn('fs-cinder', out)
self.assertIn('WFS-2,128 DSBs', out)
- # Test exception throwing when not using SSH
- m_utl.side_effect = putils.ProcessExecutionError(stdout='',
- stderr=HNAS_RESULT22,
- exit_code=255)
- self.hnas_bend.drv_configs['ssh_enabled'] = 'False'
- self.assertRaises(exception.HNASConnError, self.hnas_bend.run_cmd,
- 'ssh', '0.0.0.0', 'supervisor', 'supervisor',
- 'df', '-a')
+ def test_run_cmd_retry_exception(self):
+ self.hnas_backend.cluster_admin_ip0 = '172.24.44.11'
- m_utl.side_effect = putils.ProcessExecutionError(stdout='',
- stderr=HNAS_RESULT27,
- exit_code=255)
- self.hnas_bend.drv_configs['ssh_enabled'] = 'False'
- self.assertRaises(exception.HNASConnError, self.hnas_bend.run_cmd,
- 'ssh', '0.0.0.0', 'supervisor', 'supervisor',
- 'df', '-a')
+ exceptions = [putils.ProcessExecutionError(stderr='Connection reset'),
+ putils.ProcessExecutionError(stderr='Failed to establish'
+ ' SSC connection'),
+ putils.ProcessExecutionError(stderr='Connection reset'),
+ putils.ProcessExecutionError(stderr='Connection reset'),
+ putils.ProcessExecutionError(stderr='Connection reset')]
- # Test exception throwing when using SSH
- m_ssh.side_effect = putils.ProcessExecutionError(stdout='',
- stderr=HNAS_RESULT22,
- exit_code=255)
- self.hnas_bend.drv_configs['ssh_enabled'] = 'True'
- self.assertRaises(exception.HNASConnError, self.hnas_bend.run_cmd,
- 'ssh', '0.0.0.0', 'supervisor', 'supervisor',
- 'df', '-a')
+ self.mock_object(os.path, 'isfile',
+ mock.Mock(return_value=True))
+ self.mock_object(utils, 'execute')
+ self.mock_object(time, 'sleep')
+ self.mock_object(paramiko, 'SSHClient')
+ self.mock_object(paramiko.RSAKey, 'from_private_key_file')
+ self.mock_object(putils, 'ssh_execute',
+ mock.Mock(side_effect=exceptions))
- @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
- side_effect=m_run_cmd)
- @mock.patch.object(utils, 'execute', return_value=UTILS_EXEC_OUT)
- def test_get_version(self, m_cmd, m_exec):
- out = self.hnas_bend.get_version("ssh", "1.0", "0.0.0.0", "supervisor",
- "supervisor")
- self.assertIn('11.2.3319.14', out)
- self.assertIn('83-68-96-AA-DA-5D', out)
+ self.assertRaises(exception.HNASConnError, self.hnas_backend._run_cmd,
+ 'ssh', '0.0.0.0', 'supervisor', 'supervisor', 'df',
+ '-a')
- @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
- side_effect=m_run_cmd)
- def test_get_version_ssh_cluster(self, m_cmd):
- self.hnas_bend.drv_configs['ssh_enabled'] = 'True'
- self.hnas_bend.drv_configs['cluster_admin_ip0'] = '1.1.1.1'
- out = self.hnas_bend.get_version("ssh", "1.0", "0.0.0.0", "supervisor",
- "supervisor")
- self.assertIn('11.2.3319.14', out)
- self.assertIn('83-68-96-AA-DA-5D', out)
+ def test_run_cmd_exception_without_retry(self):
+ self.mock_object(os.path, 'isfile',
+ mock.Mock(return_value=True))
+ self.mock_object(utils, 'execute')
+ self.mock_object(time, 'sleep')
+ self.mock_object(paramiko, 'SSHClient')
+ self.mock_object(paramiko.RSAKey, 'from_private_key_file')
+ self.mock_object(putils, 'ssh_execute',
+ mock.Mock(side_effect=putils.ProcessExecutionError
+ (stderr='Error')))
- @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
- side_effect=m_run_cmd)
- @mock.patch.object(utils, 'execute', return_value=UTILS_EXEC_OUT)
- def test_get_version_ssh_disable(self, m_cmd, m_exec):
- self.hnas_bend.drv_configs['ssh_enabled'] = 'False'
- out = self.hnas_bend.get_version("ssh", "1.0", "0.0.0.0", "supervisor",
- "supervisor")
- self.assertIn('11.2.3319.14', out)
- self.assertIn('83-68-96-AA-DA-5D', out)
- self.assertIn('Utility_version', out)
+ self.assertRaises(putils.ProcessExecutionError,
+ self.hnas_backend._run_cmd, 'ssh', '0.0.0.0',
+ 'supervisor', 'supervisor', 'df', '-a')
- @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
- side_effect=m_run_cmd)
- def test_get_iscsi_info(self, m_execute):
- out = self.hnas_bend.get_iscsi_info("ssh", "0.0.0.0", "supervisor",
- "supervisor")
+ def test_get_targets_empty_list(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(return_value=('No targets', '')))
- self.assertIn('172.24.44.20', out)
- self.assertIn('172.24.44.21', out)
- self.assertIn('10.0.0.20', out)
- self.assertEqual(4, len(out.split('\n')))
+ out = self.hnas_backend._get_targets('2')
+ self.assertEqual([], out)
- @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd')
- def test_get_hdp_info(self, m_run_cmd):
- # tests when there is two or more evs
- m_run_cmd.return_value = (HNAS_RESULT5, "")
- out = self.hnas_bend.get_hdp_info("ssh", "0.0.0.0", "supervisor",
- "supervisor")
+ def test_get_targets_not_found(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(return_value=(iscsi_target_list, '')))
- self.assertEqual(10, len(out.split('\n')))
- self.assertIn('gold', out)
- self.assertIn('silver', out)
- line1 = out.split('\n')[0]
- self.assertEqual(12, len(line1.split()))
+ out = self.hnas_backend._get_targets('2', 'fake-volume')
+ self.assertEqual([], out)
- # test when there is only one evs
- m_run_cmd.return_value = (HNAS_RESULT19, "")
- out = self.hnas_bend.get_hdp_info("ssh", "0.0.0.0", "supervisor",
- "supervisor")
- self.assertEqual(3, len(out.split('\n')))
- self.assertIn('fs01-husvm', out)
- self.assertIn('manage_test02', out)
- line1 = out.split('\n')[0]
- self.assertEqual(12, len(line1.split()))
+ def test__get_unused_luid_number_0(self):
+ tgt_info = {
+ 'alias': 'cinder-default',
+ 'secret': 'pxr6U37LZZJBoMc',
+ 'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default',
+ 'lus': [
+ {'id': '1',
+ 'name': 'cinder-lu2'},
+ {'id': '2',
+ 'name': 'volume-test2'}
+ ],
+ 'auth': 'Enabled'
+ }
- @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
- side_effect=m_run_cmd)
- def test_get_nfs_info(self, m_run_cmd):
- out = self.hnas_bend.get_nfs_info("ssh", "0.0.0.0", "supervisor",
- "supervisor")
+ out = self.hnas_backend._get_unused_luid(tgt_info)
- self.assertEqual(2, len(out.split('\n')))
- self.assertIn('/export01-husvm', out)
- self.assertIn('172.24.44.20', out)
- self.assertIn('10.0.0.20', out)
+ self.assertEqual(0, out)
- @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
- side_effect=m_run_cmd)
- def test_create_lu(self, m_cmd):
- out = self.hnas_bend.create_lu("ssh", "0.0.0.0", "supervisor",
- "supervisor", "test_hdp", "1",
- "test_name")
+ def test__get_unused_no_luns(self):
+ tgt_info = {
+ 'alias': 'cinder-default',
+ 'secret': 'pxr6U37LZZJBoMc',
+ 'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default',
+ 'lus': [],
+ 'auth': 'Enabled'
+ }
- self.assertIn('successfully created', out)
+ out = self.hnas_backend._get_unused_luid(tgt_info)
- @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
- side_effect=m_run_cmd)
- def test_delete_lu(self, m_cmd):
- out = self.hnas_bend.delete_lu("ssh", "0.0.0.0", "supervisor",
- "supervisor", "test_hdp", "test_lun")
+ self.assertEqual(0, out)
- self.assertIn('deleted successfully', out)
+ def test_get_version(self):
+ expected_out = {
+ 'hardware': 'NAS Platform (M2SEKW1339109)',
+ 'mac': '83-68-96-AA-DA-5D',
+ 'version': '11.2.3319.14',
+ 'model': 'HNAS 4040',
+ 'serial': 'B1339745'
+ }
- @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
- side_effect=m_run_cmd)
- def test_create_dup(self, m_cmd):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(side_effect=[
+ (cluster_getmac, ''),
+ (version, '')]))
- out = self.hnas_bend.create_dup("ssh", "0.0.0.0", "supervisor",
- "supervisor", "test_lu", "test_hdp",
- "1", "test_clone")
+ out = self.hnas_backend.get_version()
- self.assertIn('successfully created', out)
+ self.assertEqual(expected_out, out)
- @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
- side_effect=m_run_cmd)
- def test_file_clone(self, m_cmd):
- out = self.hnas_bend.file_clone("ssh", "0.0.0.0", "supervisor",
- "supervisor", "fs01-husvm",
- "/.cinder/test_lu.iscsi", "cloned_lu")
+ def test_get_evs(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(return_value=(evsfs_list, '')))
- self.assertIn('LUN cloned_lu HDP', out)
+ out = self.hnas_backend.get_evs('fs-cinder')
- @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
- side_effect=m_run_cmd)
- def test_extend_vol(self, m_cmd):
- out = self.hnas_bend.extend_vol("ssh", "0.0.0.0", "supervisor",
- "supervisor", "test_hdp", "test_lun",
- "1", "expanded_lu")
+ self.assertEqual('2', out)
- self.assertIn('successfully extended', out)
+ def test_get_export_list(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(side_effect=[(nfs_export, ''),
+ (evsfs_list, ''),
+ (evs_list, '')]))
- @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
- side_effect=m_run_cmd)
- def test_add_iscsi_conn(self, m_cmd):
- out = self.hnas_bend.add_iscsi_conn("ssh", "0.0.0.0", "supervisor",
- "supervisor",
- "volume-8ddd1a54-0000-0000-0000",
- "test_hdp", "test_port",
- "cinder-default", "test_init")
+ out = self.hnas_backend.get_export_list()
- self.assertIn('successfully paired', out)
+ self.assertEqual('fs-cinder', out[0]['fs'])
+ self.assertEqual(250.0, out[0]['size'])
+ self.assertEqual(228.0, out[0]['free'])
+ self.assertEqual('/export01-husvm', out[0]['path'])
- @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
- side_effect=m_run_cmd)
- def test_del_iscsi_conn(self, m_cmd):
- out = self.hnas_bend.del_iscsi_conn("ssh", "0.0.0.0", "supervisor",
- "supervisor", "1", "test_iqn", 0)
+ def test_get_export_list_data_not_available(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(side_effect=[(nfs_export_not_available, ''),
+ (evsfs_list, ''),
+ (evs_list, '')]))
- self.assertIn('already deleted', out)
+ out = self.hnas_backend.get_export_list()
- @mock.patch.object(hnas_backend.HnasBackend, 'get_evs', return_value=0)
- @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd')
- def test_get_targetiqn(self, m_cmd, m_get_evs):
+ self.assertEqual('fs-cinder', out[0]['fs'])
+ self.assertEqual('/export01-husvm', out[0]['path'])
+ self.assertEqual(-1, out[0]['size'])
+ self.assertEqual(-1, out[0]['free'])
- m_cmd.side_effect = [[HNAS_RESULT12, '']]
- out = self.hnas_bend.get_targetiqn("ssh", "0.0.0.0", "supervisor",
- "supervisor", "test_iqn",
- "test_hdp", "test_secret")
+ def test_get_export_list_tb(self):
+ size = float(250 * 1024)
+ free = float(228 * 1024)
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(side_effect=[(nfs_export_tb, ''),
+ (evsfs_list, ''),
+ (evs_list, '')]))
- self.assertEqual('test_iqn', out)
+ out = self.hnas_backend.get_export_list()
- m_cmd.side_effect = [[HNAS_RESULT20, ''], [HNAS_RESULT21, '']]
- out = self.hnas_bend.get_targetiqn("ssh", "0.0.0.0", "supervisor",
- "supervisor", "test_iqn2",
- "test_hdp", "test_secret")
+ self.assertEqual('fs-cinder', out[0]['fs'])
+ self.assertEqual(size, out[0]['size'])
+ self.assertEqual(free, out[0]['free'])
+ self.assertEqual('/export01-husvm', out[0]['path'])
- self.assertEqual('test_iqn2', out)
+ def test_file_clone(self):
+ path1 = '/.cinder/path1'
+ path2 = '/.cinder/path2'
- m_cmd.side_effect = [[HNAS_RESULT20, ''], [HNAS_RESULT21, '']]
- out = self.hnas_bend.get_targetiqn("ssh", "0.0.0.0", "supervisor",
- "supervisor", "test_iqn3",
- "test_hdp", "")
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(return_value=(evsfs_list, '')))
- self.assertEqual('test_iqn3', out)
+ self.hnas_backend.file_clone('fs-cinder', path1, path2)
- @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
- side_effect=m_run_cmd)
- def test_set_targetsecret(self, m_execute):
- self.hnas_bend.set_targetsecret("ssh", "0.0.0.0", "supervisor",
- "supervisor", "test_iqn",
- "test_hdp", "test_secret")
+ calls = [mock.call('evsfs', 'list'), mock.call('console-context',
+ '--evs', '2',
+ 'file-clone-create',
+ '-f', 'fs-cinder',
+ path1, path2)]
+ self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
- @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd')
- def test_get_targetsecret(self, m_run_cmd):
- # test when target has secret
- m_run_cmd.return_value = (HNAS_RESULT12, "")
- out = self.hnas_bend.get_targetsecret("ssh", "0.0.0.0", "supervisor",
- "supervisor", "test_iqn",
- "test_hdp")
+ def test_file_clone_wrong_fs(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(return_value=(evsfs_list, '')))
- self.assertEqual('test_secret', out)
+ self.assertRaises(exception.InvalidParameterValue,
+ self.hnas_backend.file_clone, 'fs-fake', 'src',
+ 'dst')
+
+ def test_get_evs_info(self):
+ expected_out = {'evs_number': '1'}
+ expected_out2 = {'evs_number': '2'}
+
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(return_value=(evsipaddr, '')))
+
+ out = self.hnas_backend.get_evs_info()
+
+ self.hnas_backend._run_cmd.assert_called_with('evsipaddr', '-l')
+ self.assertEqual(expected_out, out['10.0.0.20'])
+ self.assertEqual(expected_out, out['172.24.44.20'])
+ self.assertEqual(expected_out2, out['172.24.44.21'])
+
+ def test_get_fs_info(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(return_value=(df_f, '')))
+
+ out = self.hnas_backend.get_fs_info('fs-cinder')
+
+ self.assertEqual('2', out['evs_id'])
+ self.assertEqual('fs-cinder', out['label'])
+ self.assertEqual('228', out['available_size'])
+ self.assertEqual('250', out['total_size'])
+ self.hnas_backend._run_cmd.assert_called_with('df', '-af', 'fs-cinder')
+
+ def test_get_fs_empty_return(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(return_value=('Not mounted', '')))
+
+ out = self.hnas_backend.get_fs_info('fs-cinder')
+ self.assertEqual({}, out)
+
+ def test_get_fs_info_single_evs(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(return_value=(df_f_single_evs, '')))
+
+ out = self.hnas_backend.get_fs_info('fs-cinder')
+
+ self.assertEqual('fs-cinder', out['label'])
+ self.assertEqual('228', out['available_size'])
+ self.assertEqual('250', out['total_size'])
+ self.hnas_backend._run_cmd.assert_called_with('df', '-af', 'fs-cinder')
+
+ def test_get_fs_tb(self):
+ available_size = float(228 * 1024 ** 2)
+ total_size = float(250 * 1024 ** 2)
+
+ df_f_tb = "\n\
+ID Label EVS Size Used Snapshots Deduped Avail \
+Thin ThinSize ThinAvail FS Type\n\
+---- ---------- --- ------ ------------ --------- ------- ------------ \
+---- -------- --------- --------------------\n\
+1025 fs-cinder 2 250 TB 21.4 TB (9%) 0 B (0%) NA 228 TB (91%) \
+ No 32 KB,WFS-2,128 DSBs\n\
+\n"
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(return_value=(df_f_tb, '')))
+
+ out = self.hnas_backend.get_fs_info('fs-cinder')
+
+ self.assertEqual('2', out['evs_id'])
+ self.assertEqual('fs-cinder', out['label'])
+ self.assertEqual(str(available_size), out['available_size'])
+ self.assertEqual(str(total_size), out['total_size'])
+ self.hnas_backend._run_cmd.assert_called_with('df', '-af', 'fs-cinder')
+
+ def test_get_fs_single_evs_tb(self):
+ available_size = float(228 * 1024 ** 2)
+ total_size = float(250 * 1024 ** 2)
+
+ df_f_tb = "\n\
+ID Label Size Used Snapshots Deduped Avail \
+Thin ThinSize ThinAvail FS Type\n\
+---- ---------- ------ ------------ --------- ------- ------------ \
+---- -------- --------- --------------------\n\
+1025 fs-cinder 250 TB 21.4 TB (9%) 0 B (0%) NA 228 TB (91%) \
+ No 32 KB,WFS-2,128 DSBs\n\
+\n"
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(return_value=(df_f_tb, '')))
+
+ out = self.hnas_backend.get_fs_info('fs-cinder')
+
+ self.assertEqual('fs-cinder', out['label'])
+ self.assertEqual(str(available_size), out['available_size'])
+ self.assertEqual(str(total_size), out['total_size'])
+ self.hnas_backend._run_cmd.assert_called_with('df', '-af', 'fs-cinder')
+
+ def test_create_lu(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(return_value=(evsfs_list, '')))
+
+ self.hnas_backend.create_lu('fs-cinder', '128', 'cinder-lu')
+
+ calls = [mock.call('evsfs', 'list'), mock.call('console-context',
+ '--evs', '2',
+ 'iscsi-lu', 'add',
+ '-e', 'cinder-lu',
+ 'fs-cinder',
+ '/.cinder/cinder-lu.'
+ 'iscsi', '128G')]
+ self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
+
+ def test_delete_lu(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(return_value=(evsfs_list, '')))
+
+ self.hnas_backend.delete_lu('fs-cinder', 'cinder-lu')
+
+ calls = [mock.call('evsfs', 'list'), mock.call('console-context',
+ '--evs', '2',
+ 'iscsi-lu', 'del', '-d',
+ '-f', 'cinder-lu')]
+ self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
+
+ def test_extend_lu(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(return_value=(evsfs_list, '')))
+
+ self.hnas_backend.extend_lu('fs-cinder', '128', 'cinder-lu')
+
+ calls = [mock.call('evsfs', 'list'), mock.call('console-context',
+ '--evs', '2',
+ 'iscsi-lu', 'expand',
+ 'cinder-lu', '128G')]
+ self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
+
+ def test_cloned_lu(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(return_value=(evsfs_list, '')))
+
+ self.hnas_backend.create_cloned_lu('cinder-lu', 'fs-cinder', 'snap')
+
+ calls = [mock.call('evsfs', 'list'), mock.call('console-context',
+ '--evs', '2',
+ 'iscsi-lu', 'clone',
+ '-e', 'cinder-lu',
+ 'snap',
+ '/.cinder/snap.iscsi')]
+ self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
+
+ def test_get_existing_lu_info(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(side_effect=[(evsfs_list, ''),
+ (iscsilu_list, '')]))
+
+ out = self.hnas_backend.get_existing_lu_info('cinder-lu', None, None)
+
+ self.assertEqual('cinder-lu', out['name'])
+ self.assertEqual('fs-cinder', out['filesystem'])
+ self.assertEqual(2.0, out['size'])
+
+ def test_get_existing_lu_info_tb(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(side_effect=[(evsfs_list, ''),
+ (iscsilu_list_tb, '')]))
+
+ out = self.hnas_backend.get_existing_lu_info('test-lu', None, None)
+
+ self.assertEqual('test-lu', out['name'])
+ self.assertEqual('fs-cinder', out['filesystem'])
+ self.assertEqual(2048.0, out['size'])
+
+ def test_rename_existing_lu(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(return_value=(evsfs_list, '')))
+ self.hnas_backend.rename_existing_lu('fs-cinder', 'cinder-lu',
+ 'new-lu-name')
+
+ calls = [mock.call('evsfs', 'list'), mock.call('console-context',
+ '--evs', '2',
+ 'iscsi-lu', 'mod', '-n',
+ "'new-lu-name'",
+ 'cinder-lu')]
+ self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
+
+ def test_check_lu(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(side_effect=[(evsfs_list, ''),
+ (iscsi_target_list, '')]))
+
+ out = self.hnas_backend.check_lu('cinder-lu', 'fs-cinder')
+
+ self.assertEqual('cinder-lu', out['tgt']['lus'][0]['name'])
+ self.assertEqual('pxr6U37LZZJBoMc', out['tgt']['secret'])
+ self.assertTrue(out['mapped'])
+ calls = [mock.call('evsfs', 'list'), mock.call('console-context',
+ '--evs', '2',
+ 'iscsi-target', 'list')]
+ self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
+
+ def test_check_lu_not_found(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(side_effect=[(evsfs_list, ''),
+ (iscsi_target_list, '')]))
+
+ # passing a volume fake-volume not mapped
+ out = self.hnas_backend.check_lu('fake-volume', 'fs-cinder')
+ self.assertFalse(out['mapped'])
+ self.assertEqual(0, out['id'])
+ self.assertIsNone(out['tgt'])
+
+ def test_add_iscsi_conn(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(side_effect=[(evsfs_list, ''),
+ (iscsi_target_list, ''),
+ (evsfs_list, '')]))
+
+ out = self.hnas_backend.add_iscsi_conn('cinder-lu', 'fs-cinder', 3260,
+ 'cinder-default', 'initiator')
+
+ self.assertEqual('cinder-lu', out['lu_name'])
+ self.assertEqual('fs-cinder', out['fs'])
+ self.assertEqual('0', out['lu_id'])
+ self.assertEqual(3260, out['port'])
+ calls = [mock.call('evsfs', 'list'),
+ mock.call('console-context', '--evs', '2', 'iscsi-target',
+ 'list')]
+ self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
+
+ def test_add_iscsi_conn_not_mapped_volume(self):
+ not_mapped = {'mapped': False,
+ 'id': 0,
+ 'tgt': None}
+
+ self.mock_object(self.hnas_backend, 'check_lu',
+ mock.Mock(return_value=not_mapped))
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(side_effect=[(evsfs_list, ''),
+ (iscsi_target_list, ''),
+ ('', '')]))
+
+ out = self.hnas_backend.add_iscsi_conn('cinder-lu', 'fs-cinder', 3260,
+ 'cinder-default', 'initiator')
+
+ self.assertEqual('cinder-lu', out['lu_name'])
+ self.assertEqual('fs-cinder', out['fs'])
+ self.assertEqual(2, out['lu_id'])
+ self.assertEqual(3260, out['port'])
+ calls = [mock.call('evsfs', 'list'),
+ mock.call('console-context', '--evs', '2', 'iscsi-target',
+ 'list')]
+ self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
+
+ def test_del_iscsi_conn(self):
+ iqn = 'iqn.2014-12.10.10.10.10:evstest1.cinder-default'
+
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(return_value=(iscsi_one_target, '')))
+
+ self.hnas_backend.del_iscsi_conn('2', iqn, '0')
+
+ calls = [mock.call('console-context', '--evs', '2', 'iscsi-target',
+ 'list', iqn),
+ mock.call('console-context', '--evs', '2', 'iscsi-target',
+ 'dellu', '-f', iqn, '0')]
+ self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
+
+ def test_del_iscsi_conn_volume_not_found(self):
+ iqn = 'iqn.2014-12.10.10.10.10:evstest1.cinder-fake'
+
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(return_value=(iscsi_one_target, '')))
+
+ self.hnas_backend.del_iscsi_conn('2', iqn, '10')
+
+ self.hnas_backend._run_cmd.assert_called_with('console-context',
+ '--evs', '2',
+ 'iscsi-target', 'list',
+ iqn)
+
+ def test_check_target(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(side_effect=[(evsfs_list, ''),
+ (iscsi_target_list, '')]))
+
+ out = self.hnas_backend.check_target('fs-cinder', 'cinder-default')
+
+ self.assertTrue(out['found'])
+ self.assertEqual('cinder-lu', out['tgt']['lus'][0]['name'])
+ self.assertEqual('cinder-default', out['tgt']['alias'])
+ self.assertEqual('pxr6U37LZZJBoMc', out['tgt']['secret'])
+
+ def test_check_target_not_found(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(side_effect=[(evsfs_list, ''),
+ (iscsi_target_list, '')]))
+
+ out = self.hnas_backend.check_target('fs-cinder', 'cinder-fake')
+
+ self.assertFalse(out['found'])
+ self.assertIsNone(out['tgt'])
+
+ def test_set_target_secret(self):
+ targetalias = 'cinder-default'
+ secret = 'pxr6U37LZZJBoMc'
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(return_value=(evsfs_list, '')))
+
+ self.hnas_backend.set_target_secret(targetalias, 'fs-cinder', secret)
+
+ calls = [mock.call('evsfs', 'list'),
+ mock.call('console-context', '--evs', '2', 'iscsi-target',
+ 'mod', '-s', 'pxr6U37LZZJBoMc', '-a', 'enable',
+ 'cinder-default')]
+ self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
+
+ def test_set_target_secret_empty_target_list(self):
+ targetalias = 'cinder-default'
+ secret = 'pxr6U37LZZJBoMc'
+
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(side_effect=[(evsfs_list, ''),
+ ('does not exist', ''),
+ ('', '')]))
+
+ self.hnas_backend.set_target_secret(targetalias, 'fs-cinder', secret)
+
+ calls = [mock.call('console-context', '--evs', '2', 'iscsi-target',
+ 'mod', '-s', 'pxr6U37LZZJBoMc', '-a', 'enable',
+ 'cinder-default')]
+ self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
+
+ def test_get_target_secret(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(side_effect=[(evsfs_list, ''),
+ (iscsi_one_target, '')]))
+ out = self.hnas_backend.get_target_secret('cinder-default',
+ 'fs-cinder')
+
+ self.assertEqual('pxr6U37LZZJBoMc', out)
+
+ self.hnas_backend._run_cmd.assert_called_with('console-context',
+ '--evs', '2',
+ 'iscsi-target', 'list',
+ 'cinder-default')
+
+ def test_get_target_secret_chap_disabled(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(side_effect=[(evsfs_list, ''),
+ (target_chap_disable, '')]))
+ out = self.hnas_backend.get_target_secret('cinder-default',
+ 'fs-cinder')
- # test when target don't have secret
- m_run_cmd.return_value = (HNAS_RESULT20, "")
- out = self.hnas_bend.get_targetsecret("ssh", "0.0.0.0", "supervisor",
- "supervisor", "test_iqn",
- "test_hdp")
self.assertEqual('', out)
- @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd')
- def test_get_targets(self, m_run_cmd):
- # Test normal behaviour
- m_run_cmd.return_value = (HNAS_RESULT23, "")
- tgt_list = self.hnas_bend._get_targets("ssh", "0.0.0.0", "supervisor",
- "supervisor", 1)
- self.assertEqual(3, len(tgt_list))
- self.assertEqual(2, len(tgt_list[2]['luns']))
+ self.hnas_backend._run_cmd.assert_called_with('console-context',
+ '--evs', '2',
+ 'iscsi-target', 'list',
+ 'cinder-default')
- # Test calling with parameter
- tgt_list = self.hnas_bend._get_targets("ssh", "0.0.0.0", "supervisor",
- "supervisor", 1,
- 'cinder-default')
- self.assertEqual(1, len(tgt_list))
- self.assertEqual(2, len(tgt_list[0]['luns']))
+ def test_get_target_iqn(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(side_effect=[(evsfs_list, ''),
+ (iscsi_one_target, ''),
+ (add_targetsecret, '')]))
- # Test error in BE command
- m_run_cmd.side_effect = putils.ProcessExecutionError
- tgt_list = self.hnas_bend._get_targets("ssh", "0.0.0.0", "supervisor",
- "supervisor", 1)
- self.assertEqual(0, len(tgt_list))
+ out = self.hnas_backend.get_target_iqn('cinder-default', 'fs-cinder')
- @mock.patch.object(hnas_backend.HnasBackend,
- 'run_cmd', side_effect=m_run_cmd)
- def test_check_targets(self, m_run_cmd):
- result, tgt = self.hnas_bend.check_target("ssh", "0.0.0.0",
- "supervisor",
- "supervisor", "test_hdp",
- "cinder-default")
- self.assertTrue(result)
- self.assertEqual('cinder-default', tgt['alias'])
+ self.assertEqual('iqn.2014-12.10.10.10.10:evstest1.cinder-default',
+ out)
- result, tgt = self.hnas_bend.check_target("ssh", "0.0.0.0",
- "supervisor",
- "supervisor", "test_hdp",
- "cinder-no-target")
- self.assertFalse(result)
- self.assertIsNone(tgt)
+ def test_create_target(self):
+ self.mock_object(self.hnas_backend, '_run_cmd',
+ mock.Mock(return_value=(evsfs_list, '')))
- @mock.patch.object(hnas_backend.HnasBackend,
- 'run_cmd', side_effect=m_run_cmd)
- def test_check_lu(self, m_run_cmd):
- ret = self.hnas_bend.check_lu("ssh", "0.0.0.0", "supervisor",
- "supervisor",
- "volume-8ddd1a54-9daf-4fa5-842",
- "test_hdp")
- result, lunid, tgt = ret
- self.assertTrue(result)
- self.assertEqual('0', lunid)
-
- ret = self.hnas_bend.check_lu("ssh", "0.0.0.0", "supervisor",
- "supervisor",
- "volume-8ddd1a54-0000-0000-000",
- "test_hdp")
- result, lunid, tgt = ret
- self.assertFalse(result)
-
- @mock.patch.object(hnas_backend.HnasBackend, 'get_evs', return_value=1)
- @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
- return_value = (HNAS_RESULT26, ""))
- def test_get_existing_lu_info(self, m_run_cmd, m_get_evs):
-
- out = self.hnas_bend.get_existing_lu_info("ssh", "0.0.0.0",
- "supervisor",
- "supervisor", "fs01-husvm",
- "test_lun")
-
- m_get_evs.assert_called_once_with('ssh', '0.0.0.0', 'supervisor',
- 'supervisor', 'fs01-husvm')
- m_run_cmd.assert_called_once_with('ssh', '0.0.0.0', 'supervisor',
- 'supervisor', 'console-context',
- '--evs', 1, 'iscsi-lu', 'list',
- 'test_lun')
-
- self.assertEqual(HNAS_RESULT26, out)
-
- @mock.patch.object(hnas_backend.HnasBackend, 'get_evs', return_value=1)
- @mock.patch.object(hnas_backend.HnasBackend, 'run_cmd',
- return_value=(HNAS_RESULT24, ""))
- def test_rename_existing_lu(self, m_run_cmd, m_get_evs):
-
- out = self.hnas_bend.rename_existing_lu("ssh", "0.0.0.0",
- "supervisor",
- "supervisor", "fs01-husvm",
- "vol_test",
- "new_vol_test")
-
- m_get_evs.assert_called_once_with('ssh', '0.0.0.0', 'supervisor',
- 'supervisor', 'fs01-husvm')
- m_run_cmd.assert_called_once_with('ssh', '0.0.0.0', 'supervisor',
- 'supervisor', 'console-context',
- '--evs', 1, 'iscsi-lu', 'mod',
- '-n', 'vol_test', 'new_vol_test')
-
- self.assertEqual(HNAS_RESULT24, out)
+ self.hnas_backend.create_target('cinder-default', 'fs-cinder',
+ 'pxr6U37LZZJBoMc')
diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_iscsi.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_iscsi.py
index 9db1bec5c7e..b50ab255e6a 100644
--- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_iscsi.py
+++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_iscsi.py
@@ -14,563 +14,560 @@
# under the License.
#
-"""
-Self test for Hitachi Unified Storage (HUS-HNAS) platform.
-"""
-
-import os
-import tempfile
-import time
-
import mock
-from oslo_concurrency import processutils as putils
-import six
+from oslo_concurrency import processutils as putils
+from oslo_config import cfg
+
+from cinder import context
from cinder import exception
from cinder import test
+from cinder.tests.unit import fake_constants as fake
+from cinder.tests.unit import fake_snapshot
+from cinder.tests.unit import fake_volume
from cinder.volume import configuration as conf
+from cinder.volume.drivers.hitachi.hnas_backend import HNASSSHBackend
from cinder.volume.drivers.hitachi import hnas_iscsi as iscsi
+from cinder.volume.drivers.hitachi import hnas_utils
from cinder.volume import volume_types
-HNASCONF = """
-
- ssc
- True
- 172.17.44.15
- supervisor
- supervisor
-
- default
- 172.17.39.132
- fs2
-
-
- silver
- 172.17.39.133
- fs2
-
-
-"""
-
-HNAS_WRONG_CONF1 = """
-
- ssc
- 172.17.44.15
- supervisor
- supervisor
- default
- 172.17.39.132:/cinder
-
-
-"""
-
-HNAS_WRONG_CONF2 = """
-
- ssc
- 172.17.44.15
- supervisor
- supervisor
-
- default
-
-
- silver
-
-
-"""
+CONF = cfg.CONF
# The following information is passed on to tests, when creating a volume
-_VOLUME = {'name': 'testvol', 'volume_id': '1234567890', 'size': 128,
- 'volume_type': 'silver', 'volume_type_id': '1',
- 'provider_location': '83-68-96-AA-DA-5D.volume-2dfe280e-470a-4182'
- '-afb8-1755025c35b8', 'id': 'abcdefg',
- 'host': 'host1@hnas-iscsi-backend#silver'}
+_VOLUME = {'name': 'volume-cinder',
+ 'id': fake.VOLUME_ID,
+ 'size': 128,
+ 'host': 'host1@hnas-iscsi-backend#default',
+ 'provider_location': '83-68-96-AA-DA-5D.volume-2dfe280e-470a-'
+ '4182-afb8-1755025c35b8'}
+_VOLUME2 = {'name': 'volume-clone',
+ 'id': fake.VOLUME2_ID,
+ 'size': 150,
+ 'host': 'host1@hnas-iscsi-backend#default',
+ 'provider_location': '83-68-96-AA-DA-5D.volume-8fe1802a-316b-'
+ '5237-1c57-c35b81755025'}
-class SimulatedHnasBackend(object):
- """Simulation Back end. Talks to HNAS."""
-
- # these attributes are shared across object instances
- start_lun = 0
- init_index = 0
- target_index = 0
- hlun = 0
-
- def __init__(self):
- self.type = 'HNAS'
- self.out = ''
- self.volumes = []
- # iSCSI connections
- self.connections = []
-
- def rename_existing_lu(self, cmd, ip0, user, pw, fslabel,
- vol_name, vol_ref_name):
- return 'Logical unit modified successfully.'
-
- def get_existing_lu_info(self, cmd, ip0, user, pw, fslabel, lun):
- out = "Name : volume-test \n\
- Comment: \n\
- Path : /.cinder/volume-test.iscsi \n\
- Size : 20 GB \n\
- File System : manage_iscsi_test \n\
- File System Mounted : Yes \n\
- Logical Unit Mounted: Yes"
- return out
-
- def deleteVolume(self, name):
- volume = self.getVolume(name)
- if volume:
- self.volumes.remove(volume)
- return True
- else:
- return False
-
- def deleteVolumebyProvider(self, provider):
- volume = self.getVolumebyProvider(provider)
- if volume:
- self.volumes.remove(volume)
- return True
- else:
- return False
-
- def getVolumes(self):
- return self.volumes
-
- def getVolume(self, name):
- if self.volumes:
- for volume in self.volumes:
- if str(volume['name']) == name:
- return volume
- return None
-
- def getVolumebyProvider(self, provider):
- if self.volumes:
- for volume in self.volumes:
- if str(volume['provider_location']) == provider:
- return volume
- return None
-
- def createVolume(self, name, provider, sizeMiB, comment):
- new_vol = {'additionalStates': [],
- 'adminSpace': {'freeMiB': 0,
- 'rawReservedMiB': 384,
- 'reservedMiB': 128,
- 'usedMiB': 128},
- 'baseId': 115,
- 'copyType': 1,
- 'creationTime8601': '2012-10-22T16:37:57-07:00',
- 'creationTimeSec': 1350949077,
- 'failedStates': [],
- 'id': 115,
- 'provider_location': provider,
- 'name': name,
- 'comment': comment,
- 'provisioningType': 1,
- 'readOnly': False,
- 'sizeMiB': sizeMiB,
- 'state': 1,
- 'userSpace': {'freeMiB': 0,
- 'rawReservedMiB': 41984,
- 'reservedMiB': 31488,
- 'usedMiB': 31488},
- 'usrSpcAllocLimitPct': 0,
- 'usrSpcAllocWarningPct': 0,
- 'uuid': '1e7daee4-49f4-4d07-9ab8-2b6a4319e243',
- 'wwn': '50002AC00073383D'}
- self.volumes.append(new_vol)
-
- def create_lu(self, cmd, ip0, user, pw, hdp, size, name):
- vol_id = name
- _out = ("LUN: %d HDP: fs2 size: %s MB, is successfully created" %
- (self.start_lun, size))
- self.createVolume(name, vol_id, size, "create-lu")
- self.start_lun += 1
- return _out
-
- def delete_lu(self, cmd, ip0, user, pw, hdp, lun):
- _out = ""
- id = "myID"
-
- self.deleteVolumebyProvider(id + '.' + str(lun))
- return _out
-
- def create_dup(self, cmd, ip0, user, pw, src_lun, hdp, size, name):
- _out = ("LUN: %s HDP: 9 size: %s MB, is successfully created" %
- (self.start_lun, size))
-
- id = name
- self.createVolume(name, id + '.' + str(self.start_lun), size,
- "create-dup")
- self.start_lun += 1
- return _out
-
- def add_iscsi_conn(self, cmd, ip0, user, pw, lun, hdp,
- port, iqn, initiator):
- ctl = ""
- conn = (self.hlun, lun, initiator, self.init_index, iqn,
- self.target_index, ctl, port)
- _out = ("H-LUN: %d mapped. LUN: %s, iSCSI Initiator: %s @ index: %d, \
- and Target: %s @ index %d is successfully paired @ CTL: %s, \
- Port: %s" % conn)
- self.init_index += 1
- self.target_index += 1
- self.hlun += 1
- self.connections.append(conn)
- return _out
-
- def del_iscsi_conn(self, cmd, ip0, user, pw, port, iqn, initiator):
-
- self.connections.pop()
-
- _out = ("H-LUN: successfully deleted from target")
- return _out
-
- def extend_vol(self, cmd, ip0, user, pw, hdp, lu, size, name):
- _out = ("LUN: %s successfully extended to %s MB" % (lu, size))
- id = name
- self.out = _out
- v = self.getVolumebyProvider(id + '.' + str(lu))
- if v:
- v['sizeMiB'] = size
- return _out
-
- def get_luns(self):
- return len(self.alloc_lun)
-
- def get_conns(self):
- return len(self.connections)
-
- def get_out(self):
- return str(self.out)
-
- def get_version(self, cmd, ver, ip0, user, pw):
- self.out = "Array_ID: 18-48-A5-A1-80-13 (3080-G2) " \
- "version: 11.2.3319.09 LU: 256" \
- " RG: 0 RG_LU: 0 Utility_version: 11.1.3225.01"
- return self.out
-
- def get_iscsi_info(self, cmd, ip0, user, pw):
- self.out = "CTL: 0 Port: 4 IP: 172.17.39.132 Port: 3260 Link: Up\n" \
- "CTL: 1 Port: 5 IP: 172.17.39.133 Port: 3260 Link: Up"
- return self.out
-
- def get_hdp_info(self, cmd, ip0, user, pw, fslabel=None):
- self.out = "HDP: 1024 272384 MB 33792 MB 12 % LUs: " \
- "70 Normal fs1\n" \
- "HDP: 1025 546816 MB 73728 MB 13 % LUs: 194 Normal fs2"
- return self.out
-
- def get_targetiqn(self, cmd, ip0, user, pw, id, hdp, secret):
- self.out = """iqn.2013-08.cinderdomain:vs61.cindertarget"""
- return self.out
-
- def set_targetsecret(self, cmd, ip0, user, pw, target, hdp, secret):
- self.out = """iqn.2013-08.cinderdomain:vs61.cindertarget"""
- return self.out
-
- def get_targetsecret(self, cmd, ip0, user, pw, target, hdp):
- self.out = """wGkJhTpXaaYJ5Rv"""
- return self.out
-
- def get_evs(self, cmd, ip0, user, pw, fsid):
- return '1'
-
- def check_lu(self, cmd, ip0, user, pw, volume_name, hdp):
- return True, 1, {'alias': 'cinder-default', 'secret': 'mysecret',
- 'iqn': 'iqn.1993-08.org.debian:01:11f90746eb2'}
-
- def check_target(self, cmd, ip0, user, pw, hdp, target_alias):
- return False, None
+_SNAPSHOT = {
+ 'name': 'snapshot-51dd4-8d8a-4aa9-9176-086c9d89e7fc',
+ 'id': fake.SNAPSHOT_ID,
+ 'size': 128,
+ 'volume_type': None,
+ 'provider_location': None,
+ 'volume_size': 128,
+ 'volume': _VOLUME,
+ 'volume_name': _VOLUME['name'],
+ 'host': 'host1@hnas-iscsi-backend#silver',
+ 'volume_type_id': fake.VOLUME_TYPE_ID,
+}
class HNASiSCSIDriverTest(test.TestCase):
"""Test HNAS iSCSI volume driver."""
- def __init__(self, *args, **kwargs):
- super(HNASiSCSIDriverTest, self).__init__(*args, **kwargs)
-
- @mock.patch.object(iscsi, 'factory_bend')
- def setUp(self, _factory_bend):
+ def setUp(self):
super(HNASiSCSIDriverTest, self).setUp()
+ self.context = context.get_admin_context()
+ self.volume = fake_volume.fake_volume_obj(
+ self.context, **_VOLUME)
+ self.volume_clone = fake_volume.fake_volume_obj(
+ self.context, **_VOLUME2)
+ self.snapshot = self.snapshot = self.instantiate_snapshot(_SNAPSHOT)
- self.backend = SimulatedHnasBackend()
- _factory_bend.return_value = self.backend
+ self.volume_type = fake_volume.fake_volume_type_obj(
+ None,
+ **{'name': 'silver'}
+ )
- self.config_file = tempfile.NamedTemporaryFile("w+", suffix='.xml')
- self.addCleanup(self.config_file.close)
- self.config_file.write(HNASCONF)
- self.config_file.flush()
+ self.parsed_xml = {
+ 'username': 'supervisor',
+ 'password': 'supervisor',
+ 'hnas_cmd': 'ssc',
+ 'fs': {'fs2': 'fs2'},
+ 'ssh_port': '22',
+ 'port': '3260',
+ 'services': {
+ 'default': {
+ 'hdp': 'fs2',
+ 'iscsi_ip': '172.17.39.132',
+ 'iscsi_port': '3260',
+ 'port': '22',
+ 'volume_type': 'default',
+ 'label': 'svc_0',
+ 'evs': '1',
+ 'tgt': {
+ 'alias': 'test',
+ 'secret': 'itEpgB5gPefGhW2'
+ }
+ },
+ 'silver': {
+ 'hdp': 'fs3',
+ 'iscsi_ip': '172.17.39.133',
+ 'iscsi_port': '3260',
+ 'port': '22',
+ 'volume_type': 'silver',
+ 'label': 'svc_1',
+ 'evs': '2',
+ 'tgt': {
+ 'alias': 'iscsi-test',
+ 'secret': 'itEpgB5gPefGhW2'
+ }
+ }
+ },
+ 'cluster_admin_ip0': None,
+ 'ssh_private_key': None,
+ 'chap_enabled': 'True',
+ 'mgmt_ip0': '172.17.44.15',
+ 'ssh_enabled': None
+ }
self.configuration = mock.Mock(spec=conf.Configuration)
- self.configuration.hds_hnas_iscsi_config_file = self.config_file.name
- self.configuration.hds_svc_iscsi_chap_enabled = True
- self.driver = iscsi.HDSISCSIDriver(configuration=self.configuration)
- self.driver.do_setup("")
+ self.configuration.hds_hnas_iscsi_config_file = 'fake.xml'
- def _create_volume(self):
- loc = self.driver.create_volume(_VOLUME)
- vol = _VOLUME.copy()
- vol['provider_location'] = loc['provider_location']
- return vol
+ self.mock_object(hnas_utils, 'read_config',
+ mock.Mock(return_value=self.parsed_xml))
- @mock.patch('six.moves.builtins.open')
- @mock.patch.object(os, 'access')
- def test_read_config(self, m_access, m_open):
- # Test exception when file is not found
- m_access.return_value = False
- m_open.return_value = six.StringIO(HNASCONF)
- self.assertRaises(exception.NotFound, iscsi._read_config, '')
+ self.driver = iscsi.HNASISCSIDriver(configuration=self.configuration)
- # Test exception when config file has parsing errors
- # due to missing tag
- m_access.return_value = True
- m_open.return_value = six.StringIO(HNAS_WRONG_CONF1)
- self.assertRaises(exception.ConfigNotFound, iscsi._read_config, '')
+ @staticmethod
+ def instantiate_snapshot(snap):
+ snap = snap.copy()
+ snap['volume'] = fake_volume.fake_volume_obj(
+ None, **snap['volume'])
+ snapshot = fake_snapshot.fake_snapshot_obj(
+ None, expected_attrs=['volume'], **snap)
+ return snapshot
- # Test exception when config file has parsing errors
- # due to missing tag
- m_open.return_value = six.StringIO(HNAS_WRONG_CONF2)
- self.configuration.hds_hnas_iscsi_config_file = ''
- self.assertRaises(exception.ParameterNotFound, iscsi._read_config, '')
+ def test_get_service_target_chap_enabled(self):
+ lu_info = {'mapped': False,
+ 'id': 1,
+ 'tgt': {'alias': 'iscsi-test',
+ 'secret': 'itEpgB5gPefGhW2'}}
+ tgt = {'found': True,
+ 'tgt': {
+ 'alias': 'cinder-default',
+ 'secret': 'pxr6U37LZZJBoMc',
+ 'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default',
+ 'lus': [
+ {'id': '0',
+ 'name': 'cinder-lu'},
+ {'id': '1',
+ 'name': 'volume-99da7ae7-1e7f-4d57-8bf...'}
+ ],
+ 'auth': 'Enabled'}}
+ iqn = 'iqn.2014-12.10.10.10.10:evstest1.cinder-default'
- def test_create_volume(self):
- loc = self.driver.create_volume(_VOLUME)
- self.assertNotEqual(loc, None)
- self.assertNotEqual(loc['provider_location'], None)
- # cleanup
- self.backend.deleteVolumebyProvider(loc['provider_location'])
+ self.mock_object(HNASSSHBackend, 'get_evs',
+ mock.Mock(return_value='1'))
+ self.mock_object(HNASSSHBackend, 'check_lu',
+ mock.Mock(return_value=lu_info))
+ self.mock_object(HNASSSHBackend, 'check_target',
+ mock.Mock(return_value=tgt))
+ self.mock_object(HNASSSHBackend, 'get_target_secret',
+ mock.Mock(return_value=''))
+ self.mock_object(HNASSSHBackend, 'set_target_secret')
+ self.mock_object(HNASSSHBackend, 'get_target_iqn',
+ mock.Mock(return_value=iqn))
- def test_get_volume_stats(self):
- stats = self.driver.get_volume_stats(True)
- self.assertEqual("HDS", stats["vendor_name"])
- self.assertEqual("iSCSI", stats["storage_protocol"])
- self.assertEqual(2, len(stats['pools']))
+ self.driver._get_service_target(self.volume)
- def test_delete_volume(self):
- vol = self._create_volume()
- self.driver.delete_volume(vol)
- # should not be deletable twice
- prov_loc = self.backend.getVolumebyProvider(vol['provider_location'])
- self.assertIsNone(prov_loc)
+ def test_get_service_target_chap_disabled(self):
+ lu_info = {'mapped': False,
+ 'id': 1,
+ 'tgt': {'alias': 'iscsi-test',
+ 'secret': 'itEpgB5gPefGhW2'}}
+ tgt = {'found': False,
+ 'tgt': {
+ 'alias': 'cinder-default',
+ 'secret': 'pxr6U37LZZJBoMc',
+ 'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default',
+ 'lus': [
+ {'id': '0',
+ 'name': 'cinder-lu'},
+ {'id': '1',
+ 'name': 'volume-99da7ae7-1e7f-4d57-8bf...'}
+ ],
+ 'auth': 'Enabled'}}
+ iqn = 'iqn.2014-12.10.10.10.10:evstest1.cinder-default'
- def test_extend_volume(self):
- vol = self._create_volume()
- new_size = _VOLUME['size'] * 2
- self.driver.extend_volume(vol, new_size)
- # cleanup
- self.backend.deleteVolumebyProvider(vol['provider_location'])
-
- @mock.patch.object(iscsi.HDSISCSIDriver, '_id_to_vol')
- def test_create_snapshot(self, m_id_to_vol):
- vol = self._create_volume()
- m_id_to_vol.return_value = vol
- svol = vol.copy()
- svol['volume_size'] = svol['size']
- loc = self.driver.create_snapshot(svol)
- self.assertNotEqual(loc, None)
- svol['provider_location'] = loc['provider_location']
- # cleanup
- self.backend.deleteVolumebyProvider(svol['provider_location'])
- self.backend.deleteVolumebyProvider(vol['provider_location'])
-
- @mock.patch.object(iscsi.HDSISCSIDriver, '_id_to_vol')
- def test_create_clone(self, m_id_to_vol):
-
- src_vol = self._create_volume()
- m_id_to_vol.return_value = src_vol
- src_vol['volume_size'] = src_vol['size']
-
- dst_vol = self._create_volume()
- dst_vol['volume_size'] = dst_vol['size']
-
- loc = self.driver.create_cloned_volume(dst_vol, src_vol)
- self.assertNotEqual(loc, None)
- # cleanup
- self.backend.deleteVolumebyProvider(src_vol['provider_location'])
- self.backend.deleteVolumebyProvider(loc['provider_location'])
-
- @mock.patch.object(iscsi.HDSISCSIDriver, '_id_to_vol')
- @mock.patch.object(iscsi.HDSISCSIDriver, 'extend_volume')
- def test_create_clone_larger_size(self, m_extend_volume, m_id_to_vol):
-
- src_vol = self._create_volume()
- m_id_to_vol.return_value = src_vol
- src_vol['volume_size'] = src_vol['size']
-
- dst_vol = self._create_volume()
- dst_vol['size'] = 256
- dst_vol['volume_size'] = dst_vol['size']
-
- loc = self.driver.create_cloned_volume(dst_vol, src_vol)
- self.assertNotEqual(loc, None)
- m_extend_volume.assert_called_once_with(dst_vol, 256)
- # cleanup
- self.backend.deleteVolumebyProvider(src_vol['provider_location'])
- self.backend.deleteVolumebyProvider(loc['provider_location'])
-
- @mock.patch.object(iscsi.HDSISCSIDriver, '_id_to_vol')
- def test_delete_snapshot(self, m_id_to_vol):
- svol = self._create_volume()
-
- lun = svol['provider_location']
- m_id_to_vol.return_value = svol
- self.driver.delete_snapshot(svol)
- self.assertIsNone(self.backend.getVolumebyProvider(lun))
-
- def test_create_volume_from_snapshot(self):
- svol = self._create_volume()
- svol['volume_size'] = svol['size']
- vol = self.driver.create_volume_from_snapshot(_VOLUME, svol)
- self.assertNotEqual(vol, None)
- # cleanup
- self.backend.deleteVolumebyProvider(svol['provider_location'])
- self.backend.deleteVolumebyProvider(vol['provider_location'])
-
- @mock.patch.object(time, 'sleep')
- @mock.patch.object(iscsi.HDSISCSIDriver, '_update_vol_location')
- def test_initialize_connection(self, m_update_vol_location, m_sleep):
- connector = {}
- connector['initiator'] = 'iqn.1993-08.org.debian:01:11f90746eb2'
- connector['host'] = 'dut_1.lab.hds.com'
- vol = self._create_volume()
- conn = self.driver.initialize_connection(vol, connector)
- self.assertIn('3260', conn['data']['target_portal'])
- self.assertIs(type(conn['data']['target_lun']), int)
-
- self.backend.add_iscsi_conn = mock.MagicMock()
- self.backend.add_iscsi_conn.side_effect = putils.ProcessExecutionError
- self.assertRaises(exception.ISCSITargetAttachFailed,
- self.driver.initialize_connection, vol, connector)
-
- # cleanup
- self.backend.deleteVolumebyProvider(vol['provider_location'])
-
- @mock.patch.object(iscsi.HDSISCSIDriver, '_update_vol_location')
- def test_terminate_connection(self, m_update_vol_location):
- connector = {}
- connector['initiator'] = 'iqn.1993-08.org.debian:01:11f90746eb2'
- connector['host'] = 'dut_1.lab.hds.com'
-
- vol = self._create_volume()
- vol['provider_location'] = "portal," +\
- connector['initiator'] +\
- ",18-48-A5-A1-80-13.0,ctl,port,hlun"
-
- conn = self.driver.initialize_connection(vol, connector)
- num_conn_before = self.backend.get_conns()
- self.driver.terminate_connection(vol, conn)
- num_conn_after = self.backend.get_conns()
- self.assertNotEqual(num_conn_before, num_conn_after)
- # cleanup
- self.backend.deleteVolumebyProvider(vol['provider_location'])
-
- @mock.patch.object(volume_types, 'get_volume_type_extra_specs',
- return_value={'key': 'type', 'service_label': 'silver'})
- def test_get_pool(self, m_ext_spec):
- label = self.driver.get_pool(_VOLUME)
- self.assertEqual('silver', label)
-
- @mock.patch.object(time, 'sleep')
- @mock.patch.object(iscsi.HDSISCSIDriver, '_update_vol_location')
- def test_get_service_target(self, m_update_vol_location, m_sleep):
-
- vol = _VOLUME.copy()
- self.backend.check_lu = mock.MagicMock()
- self.backend.check_target = mock.MagicMock()
-
- # Test the case where volume is not already mapped - CHAP enabled
- self.backend.check_lu.return_value = (False, 0, None)
- self.backend.check_target.return_value = (False, None)
- ret = self.driver._get_service_target(vol)
- iscsi_ip, iscsi_port, ctl, svc_port, hdp, alias, secret = ret
- self.assertEqual('evs1-tgt0', alias)
-
- # Test the case where volume is not already mapped - CHAP disabled
self.driver.config['chap_enabled'] = 'False'
- ret = self.driver._get_service_target(vol)
- iscsi_ip, iscsi_port, ctl, svc_port, hdp, alias, secret = ret
- self.assertEqual('evs1-tgt0', alias)
- # Test the case where all targets are full
- fake_tgt = {'alias': 'fake', 'luns': range(0, 32)}
- self.backend.check_lu.return_value = (False, 0, None)
- self.backend.check_target.return_value = (True, fake_tgt)
+ self.mock_object(HNASSSHBackend, 'get_evs',
+ mock.Mock(return_value='1'))
+ self.mock_object(HNASSSHBackend, 'check_lu',
+ mock.Mock(return_value=lu_info))
+ self.mock_object(HNASSSHBackend, 'check_target',
+ mock.Mock(return_value=tgt))
+ self.mock_object(HNASSSHBackend, 'get_target_iqn',
+ mock.Mock(return_value=iqn))
+ self.mock_object(HNASSSHBackend, 'create_target')
+
+ self.driver._get_service_target(self.volume)
+
+ def test_get_service_target_no_more_targets_exception(self):
+ iscsi.MAX_HNAS_LUS_PER_TARGET = 4
+ lu_info = {'mapped': False, 'id': 1,
+ 'tgt': {'alias': 'iscsi-test', 'secret': 'itEpgB5gPefGhW2'}}
+ tgt = {'found': True,
+ 'tgt': {
+ 'alias': 'cinder-default', 'secret': 'pxr6U37LZZJBoMc',
+ 'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default',
+ 'lus': [
+ {'id': '0', 'name': 'volume-0'},
+ {'id': '1', 'name': 'volume-1'},
+ {'id': '2', 'name': 'volume-2'},
+ {'id': '3', 'name': 'volume-3'}, ],
+ 'auth': 'Enabled'}}
+
+ self.mock_object(HNASSSHBackend, 'get_evs',
+ mock.Mock(return_value='1'))
+ self.mock_object(HNASSSHBackend, 'check_lu',
+ mock.Mock(return_value=lu_info))
+ self.mock_object(HNASSSHBackend, 'check_target',
+ mock.Mock(return_value=tgt))
+
self.assertRaises(exception.NoMoreTargets,
- self.driver._get_service_target, vol)
+ self.driver._get_service_target, self.volume)
- @mock.patch.object(iscsi.HDSISCSIDriver, '_get_service')
- def test_unmanage(self, get_service):
- get_service.return_value = ('fs2')
+ def test_check_pool_and_fs(self):
+ self.mock_object(hnas_utils, 'get_pool',
+ mock.Mock(return_value='default'))
+ self.driver._check_pool_and_fs(self.volume, 'fs2')
- self.driver.unmanage(_VOLUME)
- get_service.assert_called_once_with(_VOLUME)
-
- def test_manage_existing_get_size(self):
- vol = _VOLUME.copy()
- existing_vol_ref = {'source-name': 'manage_iscsi_test/volume-test'}
-
- out = self.driver.manage_existing_get_size(vol, existing_vol_ref)
- self.assertEqual(20, out)
-
- def test_manage_existing_get_size_error(self):
- vol = _VOLUME.copy()
- existing_vol_ref = {'source-name': 'invalid_FS/vol-not-found'}
-
- self.assertRaises(exception.ManageExistingInvalidReference,
- self.driver.manage_existing_get_size, vol,
- existing_vol_ref)
-
- def test_manage_existing_get_size_without_source_name(self):
- vol = _VOLUME.copy()
- existing_vol_ref = {
- 'source-id': 'bcc48c61-9691-4e5f-897c-793686093190'}
-
- self.assertRaises(exception.ManageExistingInvalidReference,
- self.driver.manage_existing_get_size, vol,
- existing_vol_ref)
-
- @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
- def test_manage_existing(self, m_get_extra_specs):
- vol = _VOLUME.copy()
- existing_vol_ref = {'source-name': 'fs2/volume-test'}
- version = {'provider_location': '18-48-A5-A1-80-13.testvol'}
-
- m_get_extra_specs.return_value = {'key': 'type',
- 'service_label': 'silver'}
-
- out = self.driver.manage_existing(vol, existing_vol_ref)
-
- m_get_extra_specs.assert_called_once_with('1')
- self.assertEqual(version, out)
-
- @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
- def test_manage_existing_invalid_pool(self, m_get_extra_specs):
- vol = _VOLUME.copy()
- existing_vol_ref = {'source-name': 'fs2/volume-test'}
-
- m_get_extra_specs.return_value = {'key': 'type',
- 'service_label': 'gold'}
+ def test_check_pool_and_fs_mismatch(self):
+ self.mock_object(hnas_utils, 'get_pool',
+ mock.Mock(return_value='default'))
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
- self.driver.manage_existing, vol, existing_vol_ref)
- m_get_extra_specs.assert_called_once_with('1')
+ self.driver._check_pool_and_fs, self.volume,
+ 'fs-cinder')
- def test_manage_existing_invalid_volume_name(self):
- vol = _VOLUME.copy()
- existing_vol_ref = {'source-name': 'fs2/t/est_volume'}
+ def test_check_pool_and_fs_host_mismatch(self):
+ self.mock_object(hnas_utils, 'get_pool',
+ mock.Mock(return_value='silver'))
+
+ self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
+ self.driver._check_pool_and_fs, self.volume,
+ 'fs3')
+
+ def test_do_setup(self):
+ evs_info = {'172.17.39.132': {'evs_number': 1},
+ '172.17.39.133': {'evs_number': 2},
+ '172.17.39.134': {'evs_number': 3}}
+
+ self.mock_object(HNASSSHBackend, 'get_fs_info',
+ mock.Mock(return_value=True))
+ self.mock_object(HNASSSHBackend, 'get_evs_info',
+ mock.Mock(return_value=evs_info))
+
+ self.driver.do_setup(None)
+
+ HNASSSHBackend.get_fs_info.assert_called_with('fs2')
+ self.assertTrue(HNASSSHBackend.get_evs_info.called)
+
+ def test_do_setup_portal_not_found(self):
+ evs_info = {'172.17.48.132': {'evs_number': 1},
+ '172.17.39.133': {'evs_number': 2},
+ '172.17.39.134': {'evs_number': 3}}
+
+ self.mock_object(HNASSSHBackend, 'get_fs_info',
+ mock.Mock(return_value=True))
+ self.mock_object(HNASSSHBackend, 'get_evs_info',
+ mock.Mock(return_value=evs_info))
+
+ self.assertRaises(exception.InvalidParameterValue,
+ self.driver.do_setup, None)
+
+ def test_do_setup_umounted_filesystem(self):
+ self.mock_object(HNASSSHBackend, 'get_fs_info',
+ mock.Mock(return_value=False))
+
+ self.assertRaises(exception.ParameterNotFound, self.driver.do_setup,
+ None)
+
+ def test_initialize_connection(self):
+ lu_info = {'mapped': True,
+ 'id': 1,
+ 'tgt': {'alias': 'iscsi-test',
+ 'secret': 'itEpgB5gPefGhW2'}}
+
+ conn = {'lun_name': 'cinder-lu',
+ 'initiator': 'initiator',
+ 'hdp': 'fs-cinder',
+ 'lu_id': '0',
+ 'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default',
+ 'port': 3260}
+
+ connector = {'initiator': 'fake_initiator'}
+
+ self.mock_object(HNASSSHBackend, 'get_evs',
+ mock.Mock(return_value=2))
+ self.mock_object(HNASSSHBackend, 'check_lu',
+ mock.Mock(return_value=lu_info))
+ self.mock_object(HNASSSHBackend, 'add_iscsi_conn',
+ mock.Mock(return_value=conn))
+
+ self.driver.initialize_connection(self.volume, connector)
+
+ HNASSSHBackend.add_iscsi_conn.assert_called_with(self.volume.name,
+ 'fs2', '22',
+ 'iscsi-test',
+ connector[
+ 'initiator'])
+
+ def test_initialize_connection_command_error(self):
+ lu_info = {'mapped': True,
+ 'id': 1,
+ 'tgt': {'alias': 'iscsi-test',
+ 'secret': 'itEpgB5gPefGhW2'}}
+
+ connector = {'initiator': 'fake_initiator'}
+
+ self.mock_object(HNASSSHBackend, 'get_evs',
+ mock.Mock(return_value=2))
+ self.mock_object(HNASSSHBackend, 'check_lu',
+ mock.Mock(return_value=lu_info))
+ self.mock_object(HNASSSHBackend, 'add_iscsi_conn',
+ mock.Mock(side_effect=putils.ProcessExecutionError))
+
+ self.assertRaises(exception.ISCSITargetAttachFailed,
+ self.driver.initialize_connection, self.volume,
+ connector)
+
+ def test_terminate_connection(self):
+ connector = {}
+ lu_info = {'mapped': True,
+ 'id': 1,
+ 'tgt': {'alias': 'iscsi-test',
+ 'secret': 'itEpgB5gPefGhW2'}}
+
+ self.mock_object(HNASSSHBackend, 'get_evs',
+ mock.Mock(return_value=2))
+ self.mock_object(HNASSSHBackend, 'check_lu',
+ mock.Mock(return_value=lu_info))
+ self.mock_object(HNASSSHBackend, 'del_iscsi_conn')
+
+ self.driver.terminate_connection(self.volume, connector)
+
+ HNASSSHBackend.del_iscsi_conn.assert_called_with('1',
+ 'iscsi-test',
+ lu_info['id'])
+
+ def test_get_volume_stats(self):
+ self.driver.pools = [{'pool_name': 'default',
+ 'service_label': 'svc_0',
+ 'fs': '172.17.39.132:/fs2'},
+ {'pool_name': 'silver',
+ 'service_label': 'svc_1',
+ 'fs': '172.17.39.133:/fs3'}]
+
+ fs_cinder = {
+ 'evs_id': '2',
+ 'total_size': '250',
+ 'label': 'fs-cinder',
+ 'available_size': '228',
+ 'used_size': '21.4',
+ 'id': '1025'
+ }
+
+ self.mock_object(HNASSSHBackend, 'get_fs_info',
+ mock.Mock(return_value=fs_cinder))
+
+ stats = self.driver.get_volume_stats(refresh=True)
+
+ self.assertEqual('5.0.0', stats['driver_version'])
+ self.assertEqual('Hitachi', stats['vendor_name'])
+ self.assertEqual('iSCSI', stats['storage_protocol'])
+
+ def test_create_volume(self):
+ version_info = {'mac': '83-68-96-AA-DA-5D'}
+ expected_out = {
+ 'provider_location': version_info['mac'] + '.' + self.volume.name
+ }
+
+ self.mock_object(HNASSSHBackend, 'create_lu')
+ self.mock_object(HNASSSHBackend, 'get_version',
+ mock.Mock(return_value=version_info))
+ out = self.driver.create_volume(self.volume)
+
+ self.assertEqual(expected_out, out)
+ HNASSSHBackend.create_lu.assert_called_with('fs2', u'128',
+ self.volume.name)
+
+ def test_create_volume_missing_fs(self):
+ self.volume.host = 'host1@hnas-iscsi-backend#missing'
+
+ self.assertRaises(exception.ParameterNotFound,
+ self.driver.create_volume, self.volume)
+
+ def test_delete_volume(self):
+ self.mock_object(HNASSSHBackend, 'delete_lu')
+
+ self.driver.delete_volume(self.volume)
+
+ HNASSSHBackend.delete_lu.assert_called_once_with(
+ self.parsed_xml['fs']['fs2'], self.volume.name)
+
+ def test_extend_volume(self):
+ new_size = 200
+ self.mock_object(HNASSSHBackend, 'extend_lu')
+
+ self.driver.extend_volume(self.volume, new_size)
+
+ HNASSSHBackend.extend_lu.assert_called_once_with(
+ self.parsed_xml['fs']['fs2'], new_size,
+ self.volume.name)
+
+ def test_create_cloned_volume(self):
+ clone_name = self.volume_clone.name
+ version_info = {'mac': '83-68-96-AA-DA-5D'}
+ expected_out = {
+ 'provider_location':
+ version_info['mac'] + '.' + self.volume_clone.name
+ }
+
+ self.mock_object(HNASSSHBackend, 'create_cloned_lu')
+ self.mock_object(HNASSSHBackend, 'get_version',
+ mock.Mock(return_value=version_info))
+ self.mock_object(HNASSSHBackend, 'extend_lu')
+
+ out = self.driver.create_cloned_volume(self.volume_clone, self.volume)
+ self.assertEqual(expected_out, out)
+ HNASSSHBackend.create_cloned_lu.assert_called_with(self.volume.name,
+ 'fs2',
+ clone_name)
+
+ def test_functions_with_pass(self):
+ self.driver.check_for_setup_error()
+ self.driver.ensure_export(None, self.volume)
+ self.driver.create_export(None, self.volume, 'connector')
+ self.driver.remove_export(None, self.volume)
+
+ def test_create_snapshot(self):
+ lu_info = {'lu_mounted': 'No',
+ 'name': 'cinder-lu',
+ 'fs_mounted': 'YES',
+ 'filesystem': 'FS-Cinder',
+ 'path': '/.cinder/cinder-lu.iscsi',
+ 'size': 2.0}
+ version_info = {'mac': '83-68-96-AA-DA-5D'}
+ expected_out = {
+ 'provider_location': version_info['mac'] + '.' + self.snapshot.name
+ }
+
+ self.mock_object(HNASSSHBackend, 'get_existing_lu_info',
+ mock.Mock(return_value=lu_info))
+ self.mock_object(volume_types, 'get_volume_type',
+ mock.Mock(return_value=self.volume_type))
+ self.mock_object(HNASSSHBackend, 'create_cloned_lu')
+ self.mock_object(HNASSSHBackend, 'get_version',
+ mock.Mock(return_value=version_info))
+
+ out = self.driver.create_snapshot(self.snapshot)
+ self.assertEqual(expected_out, out)
+
+ def test_delete_snapshot(self):
+ lu_info = {'filesystem': 'FS-Cinder'}
+
+ self.mock_object(volume_types, 'get_volume_type',
+ mock.Mock(return_value=self.volume_type))
+ self.mock_object(HNASSSHBackend, 'get_existing_lu_info',
+ mock.Mock(return_value=lu_info))
+ self.mock_object(HNASSSHBackend, 'delete_lu')
+
+ self.driver.delete_snapshot(self.snapshot)
+
+ def test_create_volume_from_snapshot(self):
+ version_info = {'mac': '83-68-96-AA-DA-5D'}
+ expected_out = {
+ 'provider_location': version_info['mac'] + '.' + self.snapshot.name
+ }
+
+ self.mock_object(HNASSSHBackend, 'create_cloned_lu')
+ self.mock_object(HNASSSHBackend, 'get_version',
+ mock.Mock(return_value=version_info))
+
+ out = self.driver.create_volume_from_snapshot(self.volume,
+ self.snapshot)
+ self.assertEqual(expected_out, out)
+ HNASSSHBackend.create_cloned_lu.assert_called_with(self.snapshot.name,
+ 'fs2',
+ self.volume.name)
+
+ def test_manage_existing_get_size(self):
+ existing_vol_ref = {'source-name': 'fs-cinder/volume-cinder'}
+ lu_info = {
+ 'name': 'volume-cinder',
+ 'comment': None,
+ 'path': ' /.cinder/volume-cinder',
+ 'size': 128,
+ 'filesystem': 'fs-cinder',
+ 'fs_mounted': 'Yes',
+ 'lu_mounted': 'Yes'
+ }
+
+ self.mock_object(HNASSSHBackend, 'get_existing_lu_info',
+ mock.Mock(return_value=lu_info))
+
+ out = self.driver.manage_existing_get_size(self.volume,
+ existing_vol_ref)
+
+ self.assertEqual(lu_info['size'], out)
+ HNASSSHBackend.get_existing_lu_info.assert_called_with(
+ 'volume-cinder', lu_info['filesystem'])
+
+ def test_manage_existing_get_size_no_source_name(self):
+ existing_vol_ref = {}
self.assertRaises(exception.ManageExistingInvalidReference,
- self.driver.manage_existing, vol, existing_vol_ref)
+ self.driver.manage_existing_get_size, self.volume,
+ existing_vol_ref)
- def test_manage_existing_without_volume_name(self):
- vol = _VOLUME.copy()
- existing_vol_ref = {'source-name': 'fs2/'}
+ def test_manage_existing_get_size_wrong_source_name(self):
+ existing_vol_ref = {'source-name': 'fs-cinder/volume/cinder'}
+
+ self.mock_object(HNASSSHBackend, 'get_existing_lu_info',
+ mock.Mock(return_value={}))
self.assertRaises(exception.ManageExistingInvalidReference,
- self.driver.manage_existing, vol, existing_vol_ref)
+ self.driver.manage_existing_get_size, self.volume,
+ existing_vol_ref)
- def test_manage_existing_with_FS_and_spaces(self):
- vol = _VOLUME.copy()
- existing_vol_ref = {'source-name': 'fs2/ '}
+ def test_manage_existing_get_size_volume_not_found(self):
+ existing_vol_ref = {'source-name': 'fs-cinder/volume-cinder'}
+
+ self.mock_object(HNASSSHBackend, 'get_existing_lu_info',
+ mock.Mock(return_value={}))
self.assertRaises(exception.ManageExistingInvalidReference,
- self.driver.manage_existing, vol, existing_vol_ref)
+ self.driver.manage_existing_get_size, self.volume,
+ existing_vol_ref)
+
+ def test_manage_existing(self):
+ self.volume.volume_type = self.volume_type
+ existing_vol_ref = {'source-name': 'fs2/volume-cinder'}
+ metadata = {'service_label': 'default'}
+ version_info = {'mac': '83-68-96-AA-DA-5D'}
+ expected_out = {
+ 'provider_location': version_info['mac'] + '.' + self.volume.name
+ }
+ self.mock_object(HNASSSHBackend, 'rename_existing_lu')
+ self.mock_object(volume_types, 'get_volume_type_extra_specs',
+ mock.Mock(return_value=metadata))
+ self.mock_object(HNASSSHBackend, 'get_version',
+ mock.Mock(return_value=version_info))
+
+ out = self.driver.manage_existing(self.volume, existing_vol_ref)
+
+ self.assertEqual(expected_out, out)
+ HNASSSHBackend.rename_existing_lu.assert_called_with('fs2',
+ 'volume-cinder',
+ self.volume.name)
+
+ def test_unmanage(self):
+ self.mock_object(HNASSSHBackend, 'rename_existing_lu')
+
+ self.driver.unmanage(self.volume)
+
+ HNASSSHBackend.rename_existing_lu.assert_called_with(
+ self.parsed_xml['fs']['fs2'],
+ self.volume.name, 'unmanage-' + self.volume.name)
diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_nfs.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_nfs.py
index c18a51fadb0..d4e75d3aa13 100644
--- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_nfs.py
+++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_nfs.py
@@ -14,550 +14,490 @@
# under the License.
#
-import os
-import tempfile
-
import mock
-import six
+import os
+from oslo_concurrency import processutils as putils
+import socket
+
+from cinder import context
from cinder import exception
+from cinder.image import image_utils
from cinder import test
+from cinder.tests.unit import fake_constants as fake
+from cinder.tests.unit import fake_snapshot
+from cinder.tests.unit import fake_volume
from cinder import utils
from cinder.volume import configuration as conf
+from cinder.volume.drivers.hitachi import hnas_backend as backend
from cinder.volume.drivers.hitachi import hnas_nfs as nfs
-from cinder.volume.drivers import nfs as drivernfs
-from cinder.volume.drivers import remotefs
-from cinder.volume import volume_types
+from cinder.volume.drivers.hitachi import hnas_utils
+from cinder.volume.drivers import nfs as base_nfs
-SHARESCONF = """172.17.39.132:/cinder
-172.17.39.133:/cinder"""
-
-HNASCONF = """
-
- ssc
- 172.17.44.15
- supervisor
- supervisor
-
- default
- 172.17.39.132:/cinder
-
-
- silver
- 172.17.39.133:/cinder
-
-
-"""
-
-HNAS_WRONG_CONF1 = """
-
- ssc
- 172.17.44.15
- supervisor
- supervisor
- default
- 172.17.39.132:/cinder
-
-
-"""
-
-HNAS_WRONG_CONF2 = """
-
- ssc
- 172.17.44.15
- supervisor
- supervisor
-
- default
-
-
- silver
-
-
-"""
-
-HNAS_WRONG_CONF3 = """
-
- ssc
- 172.17.44.15
-
- supervisor
-
- default
- 172.17.39.132:/cinder
-
-
- silver
- 172.17.39.133:/cinder
-
-
-"""
-
-HNAS_WRONG_CONF4 = """
-
- ssc
- 172.17.44.15
- super
- supervisor
-
- default
- 172.17.39.132:/cinder
-
-
- silver
- 172.17.39.133:/cinder
-
-
-"""
-
-HNAS_FULL_CONF = """
-
- ssc
- 172.17.44.15
- super
- supervisor
- True
- 2222
- True
- /etc/cinder/ssh_priv
- 10.0.0.1
-
- default
- 172.17.39.132:/cinder
-
-
- silver
- 172.17.39.133:/cinder/silver
-
-
- gold
- 172.17.39.133:/cinder/gold
-
-
- platinum
- 172.17.39.133:/cinder/platinum
-
-
-"""
-
-
-# The following information is passed on to tests, when creating a volume
-_SERVICE = ('Test_hdp', 'Test_path', 'Test_label')
-_SHARE = '172.17.39.132:/cinder'
-_SHARE2 = '172.17.39.133:/cinder'
-_EXPORT = '/cinder'
-_VOLUME = {'name': 'volume-bcc48c61-9691-4e5f-897c-793686093190',
- 'volume_id': 'bcc48c61-9691-4e5f-897c-793686093190',
+_VOLUME = {'name': 'cinder-volume',
+ 'id': fake.VOLUME_ID,
'size': 128,
- 'volume_type': 'silver',
- 'volume_type_id': 'test',
- 'metadata': [{'key': 'type',
- 'service_label': 'silver'}],
- 'provider_location': None,
- 'id': 'bcc48c61-9691-4e5f-897c-793686093190',
- 'status': 'available',
- 'host': 'host1@hnas-iscsi-backend#silver'}
-_SNAPVOLUME = {'name': 'snapshot-51dd4-8d8a-4aa9-9176-086c9d89e7fc',
- 'id': '51dd4-8d8a-4aa9-9176-086c9d89e7fc',
- 'size': 128,
- 'volume_type': None,
- 'provider_location': None,
- 'volume_size': 128,
- 'volume_name': 'volume-bcc48c61-9691-4e5f-897c-793686093190',
- 'volume_id': 'bcc48c61-9691-4e5f-897c-793686093191',
- 'host': 'host1@hnas-iscsi-backend#silver'}
+ 'host': 'host1@hnas-nfs-backend#default',
+ 'volume_type': 'default',
+ 'provider_location': 'hnas'}
-_VOLUME_NFS = {'name': 'volume-61da3-8d23-4bb9-3136-ca819d89e7fc',
- 'id': '61da3-8d23-4bb9-3136-ca819d89e7fc',
- 'size': 4,
- 'metadata': [{'key': 'type',
- 'service_label': 'silver'}],
- 'volume_type': 'silver',
- 'volume_type_id': 'silver',
- 'provider_location': '172.24.44.34:/silver/',
- 'volume_size': 128,
- 'host': 'host1@hnas-nfs#silver'}
-
-GET_ID_VOL = {
- ("bcc48c61-9691-4e5f-897c-793686093190"): [_VOLUME],
- ("bcc48c61-9691-4e5f-897c-793686093191"): [_SNAPVOLUME]
+_SNAPSHOT = {
+ 'name': 'snapshot-51dd4-8d8a-4aa9-9176-086c9d89e7fc',
+ 'id': fake.SNAPSHOT_ID,
+ 'size': 128,
+ 'volume_type': None,
+ 'provider_location': None,
+ 'volume_size': 128,
+ 'volume': _VOLUME,
+ 'volume_name': _VOLUME['name'],
+ 'host': 'host1@hnas-iscsi-backend#silver',
+ 'volume_type_id': fake.VOLUME_TYPE_ID,
}
-def id_to_vol(arg):
- return GET_ID_VOL.get(arg)
-
-
-class SimulatedHnasBackend(object):
- """Simulation Back end. Talks to HNAS."""
-
- # these attributes are shared across object instances
- start_lun = 0
-
- def __init__(self):
- self.type = 'HNAS'
- self.out = ''
-
- def file_clone(self, cmd, ip0, user, pw, fslabel, source_path,
- target_path):
- return ""
-
- def get_version(self, ver, cmd, ip0, user, pw):
- self.out = "Array_ID: 18-48-A5-A1-80-13 (3080-G2) " \
- "version: 11.2.3319.09 LU: 256 " \
- "RG: 0 RG_LU: 0 Utility_version: 11.1.3225.01"
- return self.out
-
- def get_hdp_info(self, ip0, user, pw):
- self.out = "HDP: 1024 272384 MB 33792 MB 12 % LUs: 70 " \
- "Normal fs1\n" \
- "HDP: 1025 546816 MB 73728 MB 13 % LUs: 194 " \
- "Normal fs2"
- return self.out
-
- def get_nfs_info(self, cmd, ip0, user, pw):
- self.out = "Export: /cinder Path: /volumes HDP: fs1 FSID: 1024 " \
- "EVS: 1 IPS: 172.17.39.132\n" \
- "Export: /cinder Path: /volumes HDP: fs2 FSID: 1025 " \
- "EVS: 1 IPS: 172.17.39.133"
- return self.out
-
-
-class HDSNFSDriverTest(test.TestCase):
+class HNASNFSDriverTest(test.TestCase):
"""Test HNAS NFS volume driver."""
def __init__(self, *args, **kwargs):
- super(HDSNFSDriverTest, self).__init__(*args, **kwargs)
+ super(HNASNFSDriverTest, self).__init__(*args, **kwargs)
- @mock.patch.object(nfs, 'factory_bend')
- def setUp(self, m_factory_bend):
- super(HDSNFSDriverTest, self).setUp()
+ def instantiate_snapshot(self, snap):
+ snap = snap.copy()
+ snap['volume'] = fake_volume.fake_volume_obj(
+ None, **snap['volume'])
+ snapshot = fake_snapshot.fake_snapshot_obj(
+ None, expected_attrs=['volume'], **snap)
+ return snapshot
- self.backend = SimulatedHnasBackend()
- m_factory_bend.return_value = self.backend
+ def setUp(self):
+ super(HNASNFSDriverTest, self).setUp()
+ self.context = context.get_admin_context()
- self.config_file = tempfile.NamedTemporaryFile("w+", suffix='.xml')
- self.addCleanup(self.config_file.close)
- self.config_file.write(HNASCONF)
- self.config_file.flush()
+ self.volume = fake_volume.fake_volume_obj(
+ self.context,
+ **_VOLUME)
- self.shares_file = tempfile.NamedTemporaryFile("w+", suffix='.xml')
- self.addCleanup(self.shares_file.close)
- self.shares_file.write(SHARESCONF)
- self.shares_file.flush()
+ self.snapshot = self.instantiate_snapshot(_SNAPSHOT)
+
+ self.volume_type = fake_volume.fake_volume_type_obj(
+ None,
+ **{'name': 'silver'}
+ )
+ self.clone = fake_volume.fake_volume_obj(
+ None,
+ **{'id': fake.VOLUME2_ID,
+ 'size': 128,
+ 'host': 'host1@hnas-nfs-backend#default',
+ 'volume_type': 'default',
+ 'provider_location': 'hnas'})
+
+ # xml parsed from utils
+ self.parsed_xml = {
+ 'username': 'supervisor',
+ 'password': 'supervisor',
+ 'hnas_cmd': 'ssc',
+ 'ssh_port': '22',
+ 'services': {
+ 'default': {
+ 'hdp': '172.24.49.21:/fs-cinder',
+ 'volume_type': 'default',
+ 'label': 'svc_0',
+ 'ctl': '1',
+ 'export': {
+ 'fs': 'fs-cinder',
+ 'path': '/export-cinder/volume'
+ }
+ },
+ },
+ 'cluster_admin_ip0': None,
+ 'ssh_private_key': None,
+ 'chap_enabled': 'True',
+ 'mgmt_ip0': '172.17.44.15',
+ 'ssh_enabled': None
+ }
+
+ self.configuration = mock.Mock(spec=conf.Configuration)
+ self.configuration.hds_hnas_nfs_config_file = 'fake.xml'
+
+ self.mock_object(hnas_utils, 'read_config',
+ mock.Mock(return_value=self.parsed_xml))
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.max_over_subscription_ratio = 20.0
self.configuration.reserved_percentage = 0
- self.configuration.hds_hnas_nfs_config_file = self.config_file.name
- self.configuration.nfs_shares_config = self.shares_file.name
- self.configuration.nfs_mount_point_base = '/opt/stack/cinder/mnt'
- self.configuration.nfs_mount_options = None
- self.configuration.nas_host = None
- self.configuration.nas_share_path = None
- self.configuration.nas_mount_options = None
+ self.configuration.hds_hnas_nfs_config_file = 'fake_config.xml'
+ self.configuration.nfs_shares_config = 'fake_nfs_share.xml'
+ self.configuration.num_shell_tries = 2
- self.driver = nfs.HDSNFSDriver(configuration=self.configuration)
- self.driver.do_setup("")
+ self.driver = nfs.HNASNFSDriver(configuration=self.configuration)
- @mock.patch('six.moves.builtins.open')
- @mock.patch.object(os, 'access')
- def test_read_config(self, m_access, m_open):
- # Test exception when file is not found
- m_access.return_value = False
- m_open.return_value = six.StringIO(HNASCONF)
- self.assertRaises(exception.NotFound, nfs._read_config, '')
+ def test_check_pool_and_share_mismatch_exception(self):
+ # passing a share that does not exists in config should raise an
+ # exception
+ nfs_shares = '172.24.49.21:/nfs_share'
- # Test exception when config file has parsing errors
- # due to missing tag
- m_access.return_value = True
- m_open.return_value = six.StringIO(HNAS_WRONG_CONF1)
- self.assertRaises(exception.ConfigNotFound, nfs._read_config, '')
-
- # Test exception when config file has parsing errors
- # due to missing tag
- m_open.return_value = six.StringIO(HNAS_WRONG_CONF2)
- self.configuration.hds_hnas_iscsi_config_file = ''
- self.assertRaises(exception.ParameterNotFound, nfs._read_config, '')
-
- # Test exception when config file has parsing errors
- # due to blank tag
- m_open.return_value = six.StringIO(HNAS_WRONG_CONF3)
- self.configuration.hds_hnas_iscsi_config_file = ''
- self.assertRaises(exception.ParameterNotFound, nfs._read_config, '')
-
- # Test when config file has parsing errors due invalid svc_number
- m_open.return_value = six.StringIO(HNAS_WRONG_CONF4)
- self.configuration.hds_hnas_iscsi_config_file = ''
- config = nfs._read_config('')
- self.assertEqual(1, len(config['services']))
-
- # Test config with full options
- # due invalid svc_number
- m_open.return_value = six.StringIO(HNAS_FULL_CONF)
- self.configuration.hds_hnas_iscsi_config_file = ''
- config = nfs._read_config('')
- self.assertEqual(4, len(config['services']))
-
- @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol')
- @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location')
- @mock.patch.object(nfs.HDSNFSDriver, '_get_export_path')
- @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location')
- def test_create_snapshot(self, m_get_volume_location, m_get_export_path,
- m_get_provider_location, m_id_to_vol):
- svol = _SNAPVOLUME.copy()
- m_id_to_vol.return_value = svol
-
- m_get_provider_location.return_value = _SHARE
- m_get_volume_location.return_value = _SHARE
- m_get_export_path.return_value = _EXPORT
-
- loc = self.driver.create_snapshot(svol)
- out = "{'provider_location': \'" + _SHARE + "'}"
- self.assertEqual(out, str(loc))
-
- @mock.patch.object(nfs.HDSNFSDriver, '_get_service')
- @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol)
- @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location')
- @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location')
- def test_create_cloned_volume(self, m_get_volume_location,
- m_get_provider_location, m_id_to_vol,
- m_get_service):
- vol = _VOLUME.copy()
- svol = _SNAPVOLUME.copy()
-
- m_get_service.return_value = _SERVICE
- m_get_provider_location.return_value = _SHARE
- m_get_volume_location.return_value = _SHARE
-
- loc = self.driver.create_cloned_volume(vol, svol)
-
- out = "{'provider_location': \'" + _SHARE + "'}"
- self.assertEqual(out, str(loc))
-
- @mock.patch.object(nfs.HDSNFSDriver, '_get_service')
- @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol)
- @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location')
- @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location')
- @mock.patch.object(nfs.HDSNFSDriver, 'extend_volume')
- def test_create_cloned_volume_larger(self, m_extend_volume,
- m_get_volume_location,
- m_get_provider_location,
- m_id_to_vol, m_get_service):
- vol = _VOLUME.copy()
- svol = _SNAPVOLUME.copy()
-
- m_get_service.return_value = _SERVICE
- m_get_provider_location.return_value = _SHARE
- m_get_volume_location.return_value = _SHARE
-
- svol['size'] = 256
-
- loc = self.driver.create_cloned_volume(svol, vol)
-
- out = "{'provider_location': \'" + _SHARE + "'}"
- self.assertEqual(out, str(loc))
- m_extend_volume.assert_called_once_with(svol, svol['size'])
-
- @mock.patch.object(nfs.HDSNFSDriver, '_ensure_shares_mounted')
- @mock.patch.object(nfs.HDSNFSDriver, '_do_create_volume')
- @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol)
- @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location')
- @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location')
- def test_create_volume(self, m_get_volume_location,
- m_get_provider_location, m_id_to_vol,
- m_do_create_volume, m_ensure_shares_mounted):
-
- vol = _VOLUME.copy()
-
- m_get_provider_location.return_value = _SHARE2
- m_get_volume_location.return_value = _SHARE2
-
- loc = self.driver.create_volume(vol)
-
- out = "{'provider_location': \'" + _SHARE2 + "'}"
- self.assertEqual(str(loc), out)
-
- @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol')
- @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location')
- @mock.patch.object(nfs.HDSNFSDriver, '_volume_not_present')
- def test_delete_snapshot(self, m_volume_not_present,
- m_get_provider_location, m_id_to_vol):
- svol = _SNAPVOLUME.copy()
-
- m_id_to_vol.return_value = svol
- m_get_provider_location.return_value = _SHARE
-
- m_volume_not_present.return_value = True
-
- self.driver.delete_snapshot(svol)
- self.assertIsNone(svol['provider_location'])
-
- @mock.patch.object(nfs.HDSNFSDriver, '_get_service')
- @mock.patch.object(nfs.HDSNFSDriver, '_id_to_vol', side_effect=id_to_vol)
- @mock.patch.object(nfs.HDSNFSDriver, '_get_provider_location')
- @mock.patch.object(nfs.HDSNFSDriver, '_get_export_path')
- @mock.patch.object(nfs.HDSNFSDriver, '_get_volume_location')
- def test_create_volume_from_snapshot(self, m_get_volume_location,
- m_get_export_path,
- m_get_provider_location, m_id_to_vol,
- m_get_service):
- vol = _VOLUME.copy()
- svol = _SNAPVOLUME.copy()
-
- m_get_service.return_value = _SERVICE
- m_get_provider_location.return_value = _SHARE
- m_get_export_path.return_value = _EXPORT
- m_get_volume_location.return_value = _SHARE
-
- loc = self.driver.create_volume_from_snapshot(vol, svol)
- out = "{'provider_location': \'" + _SHARE + "'}"
- self.assertEqual(out, str(loc))
-
- @mock.patch.object(volume_types, 'get_volume_type_extra_specs',
- return_value={'key': 'type', 'service_label': 'silver'})
- def test_get_pool(self, m_ext_spec):
- vol = _VOLUME.copy()
-
- self.assertEqual('silver', self.driver.get_pool(vol))
-
- @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
- @mock.patch.object(os.path, 'isfile', return_value=True)
- @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share',
- return_value='/mnt/gold')
- @mock.patch.object(utils, 'resolve_hostname', return_value='172.24.44.34')
- @mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted')
- def test_manage_existing(self, m_ensure_shares, m_resolve, m_mount_point,
- m_isfile, m_get_extra_specs):
- vol = _VOLUME_NFS.copy()
-
- m_get_extra_specs.return_value = {'key': 'type',
- 'service_label': 'silver'}
- self.driver._mounted_shares = ['172.17.39.133:/cinder']
- existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'}
-
- with mock.patch.object(self.driver, '_execute'):
- out = self.driver.manage_existing(vol, existing_vol_ref)
-
- loc = {'provider_location': '172.17.39.133:/cinder'}
- self.assertEqual(loc, out)
-
- m_get_extra_specs.assert_called_once_with('silver')
- m_isfile.assert_called_once_with('/mnt/gold/volume-test')
- m_mount_point.assert_called_once_with('172.17.39.133:/cinder')
- m_resolve.assert_called_with('172.17.39.133')
- m_ensure_shares.assert_called_once_with()
-
- @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
- @mock.patch.object(os.path, 'isfile', return_value=True)
- @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share',
- return_value='/mnt/gold')
- @mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133')
- @mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted')
- def test_manage_existing_move_fails(self, m_ensure_shares, m_resolve,
- m_mount_point, m_isfile,
- m_get_extra_specs):
- vol = _VOLUME_NFS.copy()
-
- m_get_extra_specs.return_value = {'key': 'type',
- 'service_label': 'silver'}
- self.driver._mounted_shares = ['172.17.39.133:/cinder']
- existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'}
- self.driver._execute = mock.Mock(side_effect=OSError)
-
- self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.manage_existing, vol, existing_vol_ref)
- m_get_extra_specs.assert_called_once_with('silver')
- m_isfile.assert_called_once_with('/mnt/gold/volume-test')
- m_mount_point.assert_called_once_with('172.17.39.133:/cinder')
- m_resolve.assert_called_with('172.17.39.133')
- m_ensure_shares.assert_called_once_with()
-
- @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
- @mock.patch.object(os.path, 'isfile', return_value=True)
- @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share',
- return_value='/mnt/gold')
- @mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133')
- @mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted')
- def test_manage_existing_invalid_pool(self, m_ensure_shares, m_resolve,
- m_mount_point, m_isfile,
- m_get_extra_specs):
- vol = _VOLUME_NFS.copy()
- m_get_extra_specs.return_value = {'key': 'type',
- 'service_label': 'gold'}
- self.driver._mounted_shares = ['172.17.39.133:/cinder']
- existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'}
- self.driver._execute = mock.Mock(side_effect=OSError)
+ self.mock_object(hnas_utils, 'get_pool',
+ mock.Mock(return_value='default'))
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
- self.driver.manage_existing, vol, existing_vol_ref)
- m_get_extra_specs.assert_called_once_with('silver')
- m_isfile.assert_called_once_with('/mnt/gold/volume-test')
- m_mount_point.assert_called_once_with('172.17.39.133:/cinder')
- m_resolve.assert_called_with('172.17.39.133')
- m_ensure_shares.assert_called_once_with()
+ self.driver._check_pool_and_share, self.volume,
+ nfs_shares)
- @mock.patch.object(utils, 'get_file_size', return_value=4000000000)
- @mock.patch.object(os.path, 'isfile', return_value=True)
- @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share',
- return_value='/mnt/gold')
- @mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133')
- @mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted')
- def test_manage_existing_get_size(self, m_ensure_shares, m_resolve,
- m_mount_point,
- m_isfile, m_file_size):
+ def test_check_pool_and_share_type_mismatch_exception(self):
+ nfs_shares = '172.24.49.21:/fs-cinder'
+ self.volume.host = 'host1@hnas-nfs-backend#gold'
- vol = _VOLUME_NFS.copy()
+ # returning a pool different from 'default' should raise an exception
+ self.mock_object(hnas_utils, 'get_pool',
+ mock.Mock(return_value='default'))
- self.driver._mounted_shares = ['172.17.39.133:/cinder']
- existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'}
+ self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
+ self.driver._check_pool_and_share, self.volume,
+ nfs_shares)
- out = self.driver.manage_existing_get_size(vol, existing_vol_ref)
+ def test_do_setup(self):
+ version_info = {
+ 'mac': '83-68-96-AA-DA-5D',
+ 'model': 'HNAS 4040',
+ 'version': '12.4.3924.11',
+ 'hardware': 'NAS Platform',
+ 'serial': 'B1339109',
+ }
+ export_list = [
+ {'fs': 'fs-cinder',
+ 'name': '/fs-cinder',
+ 'free': 228.0,
+ 'path': '/fs-cinder',
+ 'evs': ['172.24.49.21'],
+ 'size': 250.0}
+ ]
- self.assertEqual(vol['size'], out)
- m_file_size.assert_called_once_with('/mnt/gold/volume-test')
- m_isfile.assert_called_once_with('/mnt/gold/volume-test')
- m_mount_point.assert_called_once_with('172.17.39.133:/cinder')
- m_resolve.assert_called_with('172.17.39.133')
- m_ensure_shares.assert_called_once_with()
+ showmount = "Export list for 172.24.49.21: \n\
+/fs-cinder * \n\
+/shares/9bcf0bcc-8cc8-437e38bcbda9 127.0.0.1,10.1.0.5,172.24.44.141 \n\
+"
- @mock.patch.object(utils, 'get_file_size', return_value='badfloat')
- @mock.patch.object(os.path, 'isfile', return_value=True)
- @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share',
- return_value='/mnt/gold')
- @mock.patch.object(utils, 'resolve_hostname', return_value='172.17.39.133')
- @mock.patch.object(remotefs.RemoteFSDriver, '_ensure_shares_mounted')
- def test_manage_existing_get_size_error(self, m_ensure_shares, m_resolve,
- m_mount_point,
- m_isfile, m_file_size):
- vol = _VOLUME_NFS.copy()
+ self.mock_object(backend.HNASSSHBackend, 'get_version',
+ mock.Mock(return_value=version_info))
+ self.mock_object(self.driver, '_load_shares_config')
+ self.mock_object(backend.HNASSSHBackend, 'get_export_list',
+ mock.Mock(return_value=export_list))
+ self.mock_object(self.driver, '_execute',
+ mock.Mock(return_value=(showmount, '')))
- self.driver._mounted_shares = ['172.17.39.133:/cinder']
- existing_vol_ref = {'source-name': '172.17.39.133:/cinder/volume-test'}
+ self.driver.do_setup(None)
+
+ self.driver._execute.assert_called_with('showmount', '-e',
+ '172.24.49.21')
+ self.assertTrue(backend.HNASSSHBackend.get_export_list.called)
+
+ def test_do_setup_execute_exception(self):
+ version_info = {
+ 'mac': '83-68-96-AA-DA-5D',
+ 'model': 'HNAS 4040',
+ 'version': '12.4.3924.11',
+ 'hardware': 'NAS Platform',
+ 'serial': 'B1339109',
+ }
+
+ export_list = [
+ {'fs': 'fs-cinder',
+ 'name': '/fs-cinder',
+ 'free': 228.0,
+ 'path': '/fs-cinder',
+ 'evs': ['172.24.49.21'],
+ 'size': 250.0}
+ ]
+
+ self.mock_object(backend.HNASSSHBackend, 'get_version',
+ mock.Mock(return_value=version_info))
+ self.mock_object(self.driver, '_load_shares_config')
+ self.mock_object(backend.HNASSSHBackend, 'get_export_list',
+ mock.Mock(return_value=export_list))
+ self.mock_object(self.driver, '_execute',
+ mock.Mock(side_effect=putils.ProcessExecutionError))
+
+ self.assertRaises(putils.ProcessExecutionError, self.driver.do_setup,
+ None)
+
+ def test_do_setup_missing_export(self):
+ version_info = {
+ 'mac': '83-68-96-AA-DA-5D',
+ 'model': 'HNAS 4040',
+ 'version': '12.4.3924.11',
+ 'hardware': 'NAS Platform',
+ 'serial': 'B1339109',
+ }
+ export_list = [
+ {'fs': 'fs-cinder',
+ 'name': '/wrong-fs',
+ 'free': 228.0,
+ 'path': '/fs-cinder',
+ 'evs': ['172.24.49.21'],
+ 'size': 250.0}
+ ]
+
+ showmount = "Export list for 172.24.49.21: \n\
+/fs-cinder * \n\
+"
+
+ self.mock_object(backend.HNASSSHBackend, 'get_version',
+ mock.Mock(return_value=version_info))
+ self.mock_object(self.driver, '_load_shares_config')
+ self.mock_object(backend.HNASSSHBackend, 'get_export_list',
+ mock.Mock(return_value=export_list))
+ self.mock_object(self.driver, '_execute',
+ mock.Mock(return_value=(showmount, '')))
+
+ self.assertRaises(exception.InvalidParameterValue,
+ self.driver.do_setup, None)
+
+ def test_create_volume(self):
+ self.mock_object(self.driver, '_ensure_shares_mounted')
+ self.mock_object(self.driver, '_do_create_volume')
+
+ out = self.driver.create_volume(self.volume)
+
+ self.assertEqual('172.24.49.21:/fs-cinder', out['provider_location'])
+ self.assertTrue(self.driver._ensure_shares_mounted.called)
+
+ def test_create_volume_exception(self):
+ # pool 'original' doesnt exists in services
+ self.volume.host = 'host1@hnas-nfs-backend#original'
+
+ self.mock_object(self.driver, '_ensure_shares_mounted')
+
+ self.assertRaises(exception.ParameterNotFound,
+ self.driver.create_volume, self.volume)
+
+ def test_create_cloned_volume(self):
+ self.volume.size = 150
+
+ self.mock_object(self.driver, 'extend_volume')
+ self.mock_object(backend.HNASSSHBackend, 'file_clone')
+
+ out = self.driver.create_cloned_volume(self.volume, self.clone)
+
+ self.assertEqual('hnas', out['provider_location'])
+
+ def test_get_volume_stats(self):
+ self.driver.pools = [{'pool_name': 'default',
+ 'service_label': 'default',
+ 'fs': '172.24.49.21:/easy-stack'},
+ {'pool_name': 'cinder_svc',
+ 'service_label': 'cinder_svc',
+ 'fs': '172.24.49.26:/MNT-CinderTest2'}]
+
+ self.mock_object(self.driver, '_update_volume_stats')
+ self.mock_object(self.driver, '_get_capacity_info',
+ mock.Mock(return_value=(150, 50, 100)))
+
+ out = self.driver.get_volume_stats()
+
+ self.assertEqual('5.0.0', out['driver_version'])
+ self.assertEqual('Hitachi', out['vendor_name'])
+ self.assertEqual('NFS', out['storage_protocol'])
+
+ def test_create_volume_from_snapshot(self):
+ self.mock_object(backend.HNASSSHBackend, 'file_clone')
+
+ self.driver.create_volume_from_snapshot(self.volume, self.snapshot)
+
+ def test_create_snapshot(self):
+ self.mock_object(backend.HNASSSHBackend, 'file_clone')
+ self.driver.create_snapshot(self.snapshot)
+
+ def test_delete_snapshot(self):
+ self.mock_object(self.driver, '_execute')
+
+ self.driver.delete_snapshot(self.snapshot)
+
+ def test_delete_snapshot_execute_exception(self):
+ self.mock_object(self.driver, '_execute',
+ mock.Mock(side_effect=putils.ProcessExecutionError))
+
+ self.driver.delete_snapshot(self.snapshot)
+
+ def test_extend_volume(self):
+ share_mount_point = '/fs-cinder'
+ data = image_utils.imageutils.QemuImgInfo
+ data.virtual_size = 200 * 1024 ** 3
+
+ self.mock_object(self.driver, '_get_mount_point_for_share',
+ mock.Mock(return_value=share_mount_point))
+ self.mock_object(image_utils, 'qemu_img_info',
+ mock.Mock(return_value=data))
+
+ self.driver.extend_volume(self.volume, 200)
+
+ self.driver._get_mount_point_for_share.assert_called_with('hnas')
+
+ def test_extend_volume_resizing_exception(self):
+ share_mount_point = '/fs-cinder'
+ data = image_utils.imageutils.QemuImgInfo
+ data.virtual_size = 2048 ** 3
+
+ self.mock_object(self.driver, '_get_mount_point_for_share',
+ mock.Mock(return_value=share_mount_point))
+ self.mock_object(image_utils, 'qemu_img_info',
+ mock.Mock(return_value=data))
+
+ self.mock_object(image_utils, 'resize_image')
+
+ self.assertRaises(exception.InvalidResults,
+ self.driver.extend_volume, self.volume, 200)
+
+ def test_manage_existing(self):
+ self.driver._mounted_shares = ['172.24.49.21:/fs-cinder']
+ existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'}
+
+ self.mock_object(os.path, 'isfile', mock.Mock(return_value=True))
+ self.mock_object(self.driver, '_get_mount_point_for_share',
+ mock.Mock(return_value='/fs-cinder/cinder-volume'))
+ self.mock_object(utils, 'resolve_hostname',
+ mock.Mock(return_value='172.24.49.21'))
+ self.mock_object(self.driver, '_ensure_shares_mounted')
+ self.mock_object(self.driver, '_execute')
+
+ out = self.driver.manage_existing(self.volume, existing_vol_ref)
+
+ loc = {'provider_location': '172.24.49.21:/fs-cinder'}
+ self.assertEqual(loc, out)
+
+ os.path.isfile.assert_called_once_with('/fs-cinder/cinder-volume/')
+ self.driver._get_mount_point_for_share.assert_called_once_with(
+ '172.24.49.21:/fs-cinder')
+ utils.resolve_hostname.assert_called_with('172.24.49.21')
+ self.driver._ensure_shares_mounted.assert_called_once_with()
+
+ def test_manage_existing_name_matches(self):
+ self.driver._mounted_shares = ['172.24.49.21:/fs-cinder']
+ existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'}
+
+ self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref',
+ mock.Mock(return_value=('172.24.49.21:/fs-cinder',
+ '/mnt/silver',
+ self.volume.name)))
+
+ out = self.driver.manage_existing(self.volume, existing_vol_ref)
+
+ loc = {'provider_location': '172.24.49.21:/fs-cinder'}
+ self.assertEqual(loc, out)
+
+ def test_manage_existing_exception(self):
+ existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'}
+
+ self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref',
+ mock.Mock(return_value=('172.24.49.21:/fs-cinder',
+ '/mnt/silver',
+ 'cinder-volume')))
+ self.mock_object(self.driver, '_execute',
+ mock.Mock(side_effect=putils.ProcessExecutionError))
self.assertRaises(exception.VolumeBackendAPIException,
- self.driver.manage_existing_get_size, vol,
+ self.driver.manage_existing, self.volume,
existing_vol_ref)
- m_file_size.assert_called_once_with('/mnt/gold/volume-test')
- m_isfile.assert_called_once_with('/mnt/gold/volume-test')
- m_mount_point.assert_called_once_with('172.17.39.133:/cinder')
- m_resolve.assert_called_with('172.17.39.133')
- m_ensure_shares.assert_called_once_with()
- def test_manage_existing_get_size_without_source_name(self):
- vol = _VOLUME.copy()
- existing_vol_ref = {
- 'source-id': 'bcc48c61-9691-4e5f-897c-793686093190'}
+ def test_manage_existing_missing_source_name(self):
+ # empty source-name should raise an exception
+ existing_vol_ref = {}
self.assertRaises(exception.ManageExistingInvalidReference,
- self.driver.manage_existing_get_size, vol,
+ self.driver.manage_existing, self.volume,
existing_vol_ref)
- @mock.patch.object(drivernfs.NfsDriver, '_get_mount_point_for_share',
- return_value='/mnt/gold')
- def test_unmanage(self, m_mount_point):
- with mock.patch.object(self.driver, '_execute'):
- vol = _VOLUME_NFS.copy()
- self.driver.unmanage(vol)
+ def test_manage_existing_missing_volume_in_backend(self):
+ self.driver._mounted_shares = ['172.24.49.21:/fs-cinder']
+ existing_vol_ref = {'source-name': '172.24.49.21:/fs-cinder'}
- m_mount_point.assert_called_once_with('172.24.44.34:/silver/')
+ self.mock_object(self.driver, '_ensure_shares_mounted')
+ self.mock_object(utils, 'resolve_hostname',
+ mock.Mock(side_effect=['172.24.49.21',
+ '172.24.49.22']))
+
+ self.assertRaises(exception.ManageExistingInvalidReference,
+ self.driver.manage_existing, self.volume,
+ existing_vol_ref)
+
+ def test_manage_existing_get_size(self):
+ existing_vol_ref = {
+ 'source-name': '172.24.49.21:/fs-cinder/cinder-volume',
+ }
+ self.driver._mounted_shares = ['172.24.49.21:/fs-cinder']
+ expected_size = 1
+
+ self.mock_object(self.driver, '_ensure_shares_mounted')
+ self.mock_object(utils, 'resolve_hostname',
+ mock.Mock(return_value='172.24.49.21'))
+ self.mock_object(base_nfs.NfsDriver, '_get_mount_point_for_share',
+ mock.Mock(return_value='/mnt/silver'))
+ self.mock_object(os.path, 'isfile',
+ mock.Mock(return_value=True))
+ self.mock_object(utils, 'get_file_size',
+ mock.Mock(return_value=expected_size))
+
+ out = self.driver.manage_existing_get_size(self.volume,
+ existing_vol_ref)
+
+ self.assertEqual(1, out)
+ utils.get_file_size.assert_called_once_with(
+ '/mnt/silver/cinder-volume')
+ utils.resolve_hostname.assert_called_with('172.24.49.21')
+
+ def test_manage_existing_get_size_exception(self):
+ existing_vol_ref = {
+ 'source-name': '172.24.49.21:/fs-cinder/cinder-volume',
+ }
+ self.driver._mounted_shares = ['172.24.49.21:/fs-cinder']
+
+ self.mock_object(self.driver, '_get_share_mount_and_vol_from_vol_ref',
+ mock.Mock(return_value=('172.24.49.21:/fs-cinder',
+ '/mnt/silver',
+ 'cinder-volume')))
+
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.driver.manage_existing_get_size, self.volume,
+ existing_vol_ref)
+
+ def test_manage_existing_get_size_resolving_hostname_exception(self):
+ existing_vol_ref = {
+ 'source-name': '172.24.49.21:/fs-cinder/cinder-volume',
+ }
+
+ self.driver._mounted_shares = ['172.24.49.21:/fs-cinder']
+
+ self.mock_object(self.driver, '_ensure_shares_mounted')
+ self.mock_object(utils, 'resolve_hostname',
+ mock.Mock(side_effect=socket.gaierror))
+
+ self.assertRaises(socket.gaierror,
+ self.driver.manage_existing_get_size, self.volume,
+ existing_vol_ref)
+
+ def test_unmanage(self):
+ path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e'
+ vol_str = 'volume-' + self.volume.id
+ vol_path = os.path.join(path, vol_str)
+ new_path = os.path.join(path, 'unmanage-' + vol_str)
+
+ self.mock_object(self.driver, '_get_mount_point_for_share',
+ mock.Mock(return_value=path))
+ self.mock_object(self.driver, '_execute')
+
+ self.driver.unmanage(self.volume)
+
+ self.driver._execute.assert_called_with('mv', vol_path, new_path,
+ run_as_root=False,
+ check_exit_code=True)
+ self.driver._get_mount_point_for_share.assert_called_with(
+ self.volume.provider_location)
+
+ def test_unmanage_volume_exception(self):
+ path = '/opt/stack/cinder/mnt/826692dfaeaf039b1f4dcc1dacee2c2e'
+
+ self.mock_object(self.driver, '_get_mount_point_for_share',
+ mock.Mock(return_value=path))
+ self.mock_object(self.driver, '_execute',
+ mock.Mock(side_effect=ValueError))
+
+ self.driver.unmanage(self.volume)
diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_utils.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_utils.py
new file mode 100644
index 00000000000..e6ba81837db
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hnas_utils.py
@@ -0,0 +1,259 @@
+# Copyright (c) 2016 Hitachi Data Systems, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import mock
+import os
+
+from oslo_config import cfg
+from xml.etree import ElementTree as ETree
+
+from cinder import context
+from cinder import exception
+from cinder import test
+from cinder.tests.unit import fake_constants
+from cinder.tests.unit import fake_volume
+from cinder.volume.drivers.hitachi import hnas_utils
+from cinder.volume import volume_types
+
+
+_VOLUME = {'name': 'cinder-volume',
+ 'id': fake_constants.VOLUME_ID,
+ 'size': 128,
+ 'host': 'host1@hnas-nfs-backend#default',
+ 'volume_type': 'default',
+ 'provider_location': 'hnas'}
+
+service_parameters = ['volume_type', 'hdp']
+optional_parameters = ['hnas_cmd', 'cluster_admin_ip0', 'iscsi_ip']
+
+config_from_cinder_conf = {
+ 'username': 'supervisor',
+ 'fs': {'silver': 'silver',
+ 'easy-stack': 'easy-stack'},
+ 'ssh_port': '22',
+ 'chap_enabled': None,
+ 'cluster_admin_ip0': None,
+ 'ssh_private_key': None,
+ 'mgmt_ip0': '172.24.44.15',
+ 'services': {
+ 'default': {
+ 'label': u'svc_0',
+ 'volume_type': 'default',
+ 'hdp': 'easy-stack'},
+ 'FS-CinderDev1': {
+ 'label': u'svc_1',
+ 'volume_type': 'FS-CinderDev1',
+ 'hdp': 'silver'}},
+ 'password': 'supervisor',
+ 'hnas_cmd': 'ssc'}
+
+valid_XML_str = '''
+
+ 172.24.44.15
+ supervisor
+ supervisor
+ False
+ /home/ubuntu/.ssh/id_rsa
+
+ default
+ 172.24.49.21
+ easy-stack
+
+
+ silver
+ 172.24.49.32
+ FS-CinderDev1
+
+
+'''
+
+XML_no_authentication = '''
+
+ 172.24.44.15
+ supervisor
+ False
+
+'''
+
+XML_empty_authentication_param = '''
+
+ 172.24.44.15
+ supervisor
+
+ False
+
+
+ default
+ 172.24.49.21
+ easy-stack
+
+
+'''
+
+# missing mgmt_ip0
+XML_without_mandatory_params = '''
+
+ supervisor
+ supervisor
+ False
+
+ default
+ 172.24.49.21
+ easy-stack
+
+
+'''
+
+XML_no_services_configured = '''
+
+ 172.24.44.15
+ supervisor
+ supervisor
+ False
+ /home/ubuntu/.ssh/id_rsa
+
+'''
+
+parsed_xml = {'username': 'supervisor', 'password': 'supervisor',
+ 'hnas_cmd': 'ssc', 'iscsi_ip': None, 'ssh_port': '22',
+ 'fs': {'easy-stack': 'easy-stack',
+ 'FS-CinderDev1': 'FS-CinderDev1'},
+ 'cluster_admin_ip0': None,
+ 'ssh_private_key': '/home/ubuntu/.ssh/id_rsa',
+ 'services': {
+ 'default': {'hdp': 'easy-stack', 'volume_type': 'default',
+ 'label': 'svc_0'},
+ 'silver': {'hdp': 'FS-CinderDev1', 'volume_type': 'silver',
+ 'label': 'svc_1'}},
+ 'mgmt_ip0': '172.24.44.15'}
+
+valid_XML_etree = ETree.XML(valid_XML_str)
+invalid_XML_etree_no_authentication = ETree.XML(XML_no_authentication)
+invalid_XML_etree_empty_parameter = ETree.XML(XML_empty_authentication_param)
+invalid_XML_etree_no_mandatory_params = ETree.XML(XML_without_mandatory_params)
+invalid_XML_etree_no_service = ETree.XML(XML_no_services_configured)
+
+CONF = cfg.CONF
+
+
+class HNASUtilsTest(test.TestCase):
+
+ def __init__(self, *args, **kwargs):
+ super(HNASUtilsTest, self).__init__(*args, **kwargs)
+
+ def setUp(self):
+ super(HNASUtilsTest, self).setUp()
+ self.context = context.get_admin_context()
+ self.volume = fake_volume.fake_volume_obj(self.context, **_VOLUME)
+ self.volume_type = (fake_volume.fake_volume_type_obj(None, **{
+ 'id': fake_constants.VOLUME_TYPE_ID, 'name': 'silver'}))
+
+ def test_read_config(self):
+
+ self.mock_object(os, 'access', mock.Mock(return_value=True))
+ self.mock_object(ETree, 'parse',
+ mock.Mock(return_value=ETree.ElementTree))
+ self.mock_object(ETree.ElementTree, 'getroot',
+ mock.Mock(return_value=valid_XML_etree))
+
+ xml_path = 'xml_file_found'
+ out = hnas_utils.read_config(xml_path,
+ service_parameters,
+ optional_parameters)
+
+ self.assertEqual(parsed_xml, out)
+
+ def test_read_config_parser_error(self):
+ xml_file = 'hnas_nfs.xml'
+ self.mock_object(os, 'access', mock.Mock(return_value=True))
+ self.mock_object(ETree, 'parse',
+ mock.Mock(side_effect=ETree.ParseError))
+
+ self.assertRaises(exception.ConfigNotFound, hnas_utils.read_config,
+ xml_file, service_parameters, optional_parameters)
+
+ def test_read_config_not_found(self):
+ self.mock_object(os, 'access', mock.Mock(return_value=False))
+
+ xml_path = 'xml_file_not_found'
+ self.assertRaises(exception.NotFound, hnas_utils.read_config,
+ xml_path, service_parameters, optional_parameters)
+
+ def test_read_config_without_services_configured(self):
+ xml_file = 'hnas_nfs.xml'
+
+ self.mock_object(os, 'access', mock.Mock(return_value=True))
+ self.mock_object(ETree, 'parse',
+ mock.Mock(return_value=ETree.ElementTree))
+ self.mock_object(ETree.ElementTree, 'getroot',
+ mock.Mock(return_value=invalid_XML_etree_no_service))
+
+ self.assertRaises(exception.ParameterNotFound, hnas_utils.read_config,
+ xml_file, service_parameters, optional_parameters)
+
+ def test_read_config_empty_authentication_parameter(self):
+ xml_file = 'hnas_nfs.xml'
+
+ self.mock_object(os, 'access', mock.Mock(return_value=True))
+ self.mock_object(ETree, 'parse',
+ mock.Mock(return_value=ETree.ElementTree))
+ self.mock_object(ETree.ElementTree, 'getroot',
+ mock.Mock(return_value=
+ invalid_XML_etree_empty_parameter))
+
+ self.assertRaises(exception.ParameterNotFound, hnas_utils.read_config,
+ xml_file, service_parameters, optional_parameters)
+
+ def test_read_config_mandatory_parameters_missing(self):
+ xml_file = 'hnas_nfs.xml'
+
+ self.mock_object(os, 'access', mock.Mock(return_value=True))
+ self.mock_object(ETree, 'parse',
+ mock.Mock(return_value=ETree.ElementTree))
+ self.mock_object(ETree.ElementTree, 'getroot',
+ mock.Mock(return_value=
+ invalid_XML_etree_no_mandatory_params))
+
+ self.assertRaises(exception.ParameterNotFound, hnas_utils.read_config,
+ xml_file, service_parameters, optional_parameters)
+
+ def test_read_config_XML_without_authentication_parameter(self):
+ xml_file = 'hnas_nfs.xml'
+
+ self.mock_object(os, 'access', mock.Mock(return_value=True))
+ self.mock_object(ETree, 'parse',
+ mock.Mock(return_value=ETree.ElementTree))
+ self.mock_object(ETree.ElementTree, 'getroot',
+ mock.Mock(return_value=
+ invalid_XML_etree_no_authentication))
+
+ self.assertRaises(exception.ConfigNotFound, hnas_utils.read_config,
+ xml_file, service_parameters, optional_parameters)
+
+ def test_get_pool_with_vol_type(self):
+ self.mock_object(volume_types, 'get_volume_type_extra_specs',
+ mock.Mock(return_value={'service_label': 'silver'}))
+
+ self.volume.volume_type_id = fake_constants.VOLUME_TYPE_ID
+ self.volume.volume_type = self.volume_type
+
+ out = hnas_utils.get_pool(parsed_xml, self.volume)
+
+ self.assertEqual('silver', out)
+
+ def test_get_pool_without_vol_type(self):
+ out = hnas_utils.get_pool(parsed_xml, self.volume)
+ self.assertEqual('default', out)
diff --git a/cinder/volume/drivers/hitachi/hnas_backend.py b/cinder/volume/drivers/hitachi/hnas_backend.py
index 36506aaf04f..a339297be66 100644
--- a/cinder/volume/drivers/hitachi/hnas_backend.py
+++ b/cinder/volume/drivers/hitachi/hnas_backend.py
@@ -18,14 +18,12 @@
Hitachi Unified Storage (HUS-HNAS) platform. Backend operations.
"""
-import re
-
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from oslo_utils import units
import six
-from cinder.i18n import _, _LW, _LI, _LE
+from cinder.i18n import _, _LE
from cinder import exception
from cinder import ssh_utils
from cinder import utils
@@ -34,34 +32,53 @@ LOG = logging.getLogger("cinder.volume.driver")
HNAS_SSC_RETRIES = 5
-class HnasBackend(object):
- """Back end. Talks to HUS-HNAS."""
- def __init__(self, drv_configs):
- self.drv_configs = drv_configs
+class HNASSSHBackend(object):
+ def __init__(self, backend_opts):
+
+ self.mgmt_ip0 = backend_opts.get('mgmt_ip0')
+ self.hnas_cmd = backend_opts.get('hnas_cmd', 'ssc')
+ self.cluster_admin_ip0 = backend_opts.get('cluster_admin_ip0')
+ self.ssh_port = backend_opts.get('ssh_port', '22')
+ self.ssh_username = backend_opts.get('username')
+ self.ssh_pwd = backend_opts.get('password')
+ self.ssh_private_key = backend_opts.get('ssh_private_key')
+ self.storage_version = None
self.sshpool = None
+ self.fslist = {}
+ self.tgt_list = {}
@utils.retry(exceptions=exception.HNASConnError, retries=HNAS_SSC_RETRIES,
wait_random=True)
- def run_cmd(self, cmd, ip0, user, pw, *args, **kwargs):
- """Run a command on SMU or using SSH
+ def _run_cmd(self, *args, **kwargs):
+ """Runs a command on SMU using SSH.
- :param cmd: ssc command name
- :param ip0: string IP address of controller
- :param user: string user authentication for array
- :param pw: string password authentication for array
- :returns: formated string with version information
+ :returns: stdout and stderr of the command
"""
- LOG.debug('Enable ssh: %s',
- six.text_type(self.drv_configs['ssh_enabled']))
+ if self.cluster_admin_ip0 is None:
+ # Connect to SMU through SSH and run ssc locally
+ args = (self.hnas_cmd, 'localhost') + args
+ else:
+ args = (self.hnas_cmd, '--smuauth', self.cluster_admin_ip0) + args
- if self.drv_configs['ssh_enabled'] != 'True':
- # Direct connection via ssc
- args = (cmd, '--user', user, '--password', pw, ip0) + args
+ utils.check_ssh_injection(args)
+ command = ' '.join(args)
+ command = command.replace('"', '\\"')
+ if not self.sshpool:
+ self.sshpool = ssh_utils.SSHPool(ip=self.mgmt_ip0,
+ port=int(self.ssh_port),
+ conn_timeout=None,
+ login=self.ssh_username,
+ password=self.ssh_pwd,
+ privatekey=self.ssh_private_key)
+
+ with self.sshpool.item() as ssh:
try:
- out, err = utils.execute(*args, **kwargs)
- LOG.debug("command %(cmd)s result: out = %(out)s - err = "
- "%(err)s", {'cmd': cmd, 'out': out, 'err': err})
+ out, err = putils.ssh_execute(ssh, command,
+ check_exit_code=True)
+ LOG.debug("command %(cmd)s result: out = "
+ "%(out)s - err = %(err)s",
+ {'cmd': self.hnas_cmd, 'out': out, 'err': err})
return out, err
except putils.ProcessExecutionError as e:
if 'Failed to establish SSC connection' in e.stderr:
@@ -74,687 +91,428 @@ class HnasBackend(object):
raise exception.HNASConnError(msg)
else:
raise
- else:
- if self.drv_configs['cluster_admin_ip0'] is None:
- # Connect to SMU through SSH and run ssc locally
- args = (cmd, 'localhost') + args
- else:
- args = (cmd, '--smuauth',
- self.drv_configs['cluster_admin_ip0']) + args
- utils.check_ssh_injection(args)
- command = ' '.join(args)
- command = command.replace('"', '\\"')
+ def get_version(self):
+ """Gets version information from the storage unit.
- if not self.sshpool:
- server = self.drv_configs['mgmt_ip0']
- port = int(self.drv_configs['ssh_port'])
- username = self.drv_configs['username']
- # We only accept private/public key auth
- password = ""
- privatekey = self.drv_configs['ssh_private_key']
- self.sshpool = ssh_utils.SSHPool(server,
- port,
- None,
- username,
- password=password,
- privatekey=privatekey)
-
- with self.sshpool.item() as ssh:
-
- try:
- out, err = putils.ssh_execute(ssh, command,
- check_exit_code=True)
- LOG.debug("command %(cmd)s result: out = "
- "%(out)s - err = %(err)s",
- {'cmd': cmd, 'out': out, 'err': err})
- return out, err
- except putils.ProcessExecutionError as e:
- if 'Failed to establish SSC connection' in e.stderr:
- LOG.debug("SSC connection error!")
- msg = _("Failed to establish SSC connection.")
- raise exception.HNASConnError(msg)
- else:
- raise putils.ProcessExecutionError
-
- def get_version(self, cmd, ver, ip0, user, pw):
- """Gets version information from the storage unit
-
- :param cmd: ssc command name
- :param ver: string driver version
- :param ip0: string IP address of controller
- :param user: string user authentication for array
- :param pw: string password authentication for array
- :returns: formatted string with version information
- """
- out, err = self.run_cmd(cmd, ip0, user, pw, "cluster-getmac",
- check_exit_code=True)
- hardware = out.split()[2]
-
- out, err = self.run_cmd(cmd, ip0, user, pw, "ver",
- check_exit_code=True)
- lines = out.split('\n')
-
- model = ""
- for line in lines:
- if 'Model:' in line:
- model = line.split()[1]
- if 'Software:' in line:
- ver = line.split()[1]
-
- # If not using SSH, the local utility version can be different from the
- # one used in HNAS
- if self.drv_configs['ssh_enabled'] != 'True':
- out, err = utils.execute(cmd, "-version", check_exit_code=True)
- util = out.split()[1]
-
- out = ("Array_ID: %(arr)s (%(mod)s) version: %(ver)s LU: 256 "
- "RG: 0 RG_LU: 0 Utility_version: %(util)s" %
- {'arr': hardware, 'mod': model, 'ver': ver, 'util': util})
- else:
- out = ("Array_ID: %(arr)s (%(mod)s) version: %(ver)s LU: 256 "
- "RG: 0 RG_LU: 0" %
- {'arr': hardware, 'mod': model, 'ver': ver})
-
- LOG.debug('get_version: %(out)s -- %(err)s', {'out': out, 'err': err})
- return out
-
- def get_iscsi_info(self, cmd, ip0, user, pw):
- """Gets IP addresses for EVSs, use EVSID as controller.
-
- :param cmd: ssc command name
- :param ip0: string IP address of controller
- :param user: string user authentication for array
- :param pw: string password authentication for array
- :returns: formated string with iSCSI information
+ :returns: dictionary with HNAS information
+ storage_version={
+ 'mac': HNAS MAC ID,
+ 'model': HNAS model,
+ 'version': the software version,
+ 'hardware': the hardware version,
+ 'serial': HNAS serial number}
"""
+ if not self.storage_version:
+ version_info = {}
+ out, err = self._run_cmd("cluster-getmac")
+ mac = out.split(':')[1].strip()
+ version_info['mac'] = mac
- out, err = self.run_cmd(cmd, ip0, user, pw,
- 'evsipaddr', '-l',
- check_exit_code=True)
- lines = out.split('\n')
+ out, err = self._run_cmd("ver")
+ split_out = out.split('\n')
- newout = ""
- for line in lines:
+ model = split_out[1].split(':')[1].strip()
+ version = split_out[3].split()[1]
+ hardware = split_out[5].split(':')[1].strip()
+ serial = split_out[12].split()[2]
+
+ version_info['model'] = model
+ version_info['version'] = version
+ version_info['hardware'] = hardware
+ version_info['serial'] = serial
+
+ self.storage_version = version_info
+
+ return self.storage_version
+
+ def get_evs_info(self):
+ """Gets the IP addresses of all EVSs in HNAS.
+
+ :returns: dictionary with EVS information
+ evs_info={
+ : {evs_number: number identifying the EVS1 on HNAS},
+ : {evs_number: number identifying the EVS2 on HNAS},
+ ...
+ }
+ """
+ evs_info = {}
+ out, err = self._run_cmd("evsipaddr", "-l")
+
+ out = out.split('\n')
+ for line in out:
if 'evs' in line and 'admin' not in line:
- inf = line.split()
- (evsnum, ip) = (inf[1], inf[3])
- newout += "CTL: %s Port: 0 IP: %s Port: 3260 Link: Up\n" \
- % (evsnum, ip)
+ ip = line.split()[3].strip()
+ evs_info[ip] = {}
+ evs_info[ip]['evs_number'] = line.split()[1].strip()
- LOG.debug('get_iscsi_info: %(out)s -- %(err)s',
- {'out': out, 'err': err})
- return newout
+ return evs_info
- def get_hdp_info(self, cmd, ip0, user, pw, fslabel=None):
- """Gets the list of filesystems and fsids.
+ def get_fs_info(self, fs_label):
+ """Gets the information of a given FS.
- :param cmd: ssc command name
- :param ip0: string IP address of controller
- :param user: string user authentication for array
- :param pw: string password authentication for array
- :param fslabel: filesystem label we want to get info
- :returns: formated string with filesystems and fsids
+ :param fs_label: Label of the filesystem
+ :returns: dictionary with FS information
+ fs_info={
+ 'id': a Logical Unit ID,
+ 'label': a Logical Unit name,
+ 'evs_id': the ID of the EVS in which the filesystem is created
+ (not present if there is a single EVS),
+ 'total_size': the total size of the FS (in GB),
+ 'used_size': the size that is already used (in GB),
+ 'available_size': the free space (in GB)
+ }
"""
+ def _convert_size(param):
+ size = float(param) * units.Mi
+ return six.text_type(size)
- if fslabel is None:
- out, err = self.run_cmd(cmd, ip0, user, pw, 'df', '-a',
- check_exit_code=True)
- else:
- out, err = self.run_cmd(cmd, ip0, user, pw, 'df', '-f', fslabel,
- check_exit_code=True)
-
- lines = out.split('\n')
+ fs_info = {}
single_evs = True
+ id, lbl, evs, t_sz, u_sz, a_sz = 0, 1, 2, 3, 5, 12
+ t_sz_unit, u_sz_unit, a_sz_unit = 4, 6, 13
- LOG.debug("Parsing output: %s", lines)
+ out, err = self._run_cmd("df", "-af", fs_label)
- newout = ""
- for line in lines:
- if 'Not mounted' in line or 'Not determined' in line:
- continue
- if 'not' not in line and 'EVS' in line:
- single_evs = False
- if 'GB' in line or 'TB' in line:
- LOG.debug("Parsing output: %s", line)
- inf = line.split()
+ invalid_outs = ['Not mounted', 'Not determined', 'not found']
- if not single_evs:
- (fsid, fslabel, capacity) = (inf[0], inf[1], inf[3])
- (used, perstr) = (inf[5], inf[7])
- (availunit, usedunit) = (inf[4], inf[6])
- else:
- (fsid, fslabel, capacity) = (inf[0], inf[1], inf[2])
- (used, perstr) = (inf[4], inf[6])
- (availunit, usedunit) = (inf[3], inf[5])
+ for problem in invalid_outs:
+ if problem in out:
+ return {}
- if usedunit == 'GB':
- usedmultiplier = units.Ki
- else:
- usedmultiplier = units.Mi
- if availunit == 'GB':
- availmultiplier = units.Ki
- else:
- availmultiplier = units.Mi
- m = re.match("\((\d+)\%\)", perstr)
- if m:
- percent = m.group(1)
- else:
- percent = 0
- newout += "HDP: %s %d MB %d MB %d %% LUs: 256 Normal %s\n" \
- % (fsid, int(float(capacity) * availmultiplier),
- int(float(used) * usedmultiplier),
- int(percent), fslabel)
+ if 'EVS' in out:
+ single_evs = False
- LOG.debug('get_hdp_info: %(out)s -- %(err)s',
- {'out': newout, 'err': err})
- return newout
+ fs_data = out.split('\n')[3].split()
- def get_evs(self, cmd, ip0, user, pw, fsid):
- """Gets the EVSID for the named filesystem.
+ # Getting only the desired values from the output. If there is a single
+ # EVS, its ID is not shown in the output and we have to decrease the
+ # indexes to get the right values.
+ fs_info['id'] = fs_data[id]
+ fs_info['label'] = fs_data[lbl]
- :param cmd: ssc command name
- :param ip0: string IP address of controller
- :param user: string user authentication for array
- :param pw: string password authentication for array
- :returns: EVS id of the file system
+ if not single_evs:
+ fs_info['evs_id'] = fs_data[evs]
+
+ fs_info['total_size'] = (
+ (fs_data[t_sz]) if not single_evs else fs_data[t_sz - 1])
+ fs_info['used_size'] = (
+ fs_data[u_sz] if not single_evs else fs_data[u_sz - 1])
+ fs_info['available_size'] = (
+ fs_data[a_sz] if not single_evs else fs_data[a_sz - 1])
+
+ # Converting the sizes if necessary.
+ if not single_evs:
+ if fs_data[t_sz_unit] == 'TB':
+ fs_info['total_size'] = _convert_size(fs_info['total_size'])
+ if fs_data[u_sz_unit] == 'TB':
+ fs_info['used_size'] = _convert_size(fs_info['used_size'])
+ if fs_data[a_sz_unit] == 'TB':
+ fs_info['available_size'] = _convert_size(
+ fs_info['available_size'])
+ else:
+ if fs_data[t_sz_unit - 1] == 'TB':
+ fs_info['total_size'] = _convert_size(fs_info['total_size'])
+ if fs_data[u_sz_unit - 1] == 'TB':
+ fs_info['used_size'] = _convert_size(fs_info['used_size'])
+ if fs_data[a_sz_unit - 1] == 'TB':
+ fs_info['available_size'] = _convert_size(
+ fs_info['available_size'])
+
+ LOG.debug("File system info of %(fs)s (sizes in GB): %(info)s.",
+ {'fs': fs_label, 'info': fs_info})
+
+ return fs_info
+
+ def get_evs(self, fs_label):
+ """Gets the EVS ID for the named filesystem.
+
+ :param fs_label: The filesystem label related to the EVS required
+ :returns: EVS ID of the filesystem
"""
+ if not self.fslist:
+ self._get_fs_list()
- out, err = self.run_cmd(cmd, ip0, user, pw, "evsfs", "list",
- check_exit_code=True)
- LOG.debug('get_evs: out %s.', out)
+ # When the FS is found in the list of known FS, returns the EVS ID
+ for key in self.fslist:
+ if fs_label == self.fslist[key]['label']:
+ return self.fslist[key]['evsid']
- lines = out.split('\n')
- for line in lines:
- inf = line.split()
- if fsid in line and (fsid == inf[0] or fsid == inf[1]):
- return inf[3]
+ def _get_targets(self, evs_id, tgt_alias=None, refresh=False):
+ """Gets the target list of an EVS.
- LOG.warning(_LW('get_evs: %(out)s -- No find for %(fsid)s'),
- {'out': out, 'fsid': fsid})
- return 0
-
- def _get_evsips(self, cmd, ip0, user, pw, evsid):
- """Gets the EVS IPs for the named filesystem."""
-
- out, err = self.run_cmd(cmd, ip0, user, pw,
- 'evsipaddr', '-e', evsid,
- check_exit_code=True)
-
- iplist = ""
- lines = out.split('\n')
- for line in lines:
- inf = line.split()
- if 'evs' in line:
- iplist += inf[3] + ' '
-
- LOG.debug('get_evsips: %s', iplist)
- return iplist
-
- def _get_fsid(self, cmd, ip0, user, pw, fslabel):
- """Gets the FSID for the named filesystem."""
-
- out, err = self.run_cmd(cmd, ip0, user, pw, 'evsfs', 'list',
- check_exit_code=True)
- LOG.debug('get_fsid: out %s', out)
-
- lines = out.split('\n')
- for line in lines:
- inf = line.split()
- if fslabel in line and fslabel == inf[1]:
- LOG.debug('get_fsid: %s', line)
- return inf[0]
-
- LOG.warning(_LW('get_fsid: %(out)s -- No info for %(fslabel)s'),
- {'out': out, 'fslabel': fslabel})
- return 0
-
- def _get_targets(self, cmd, ip0, user, pw, evsid, tgtalias=None):
- """Get the target list of an EVS.
-
- Get the target list of an EVS. Optionally can return the target
- list of a specific target.
+ Gets the target list of an EVS. Optionally can return the information
+ of a specific target.
+ :returns: Target list or Target info (EVS ID) or empty list
"""
+ LOG.debug("Getting target list for evs %(evs)s, tgtalias: %(tgt)s.",
+ {'evs': evs_id, 'tgt': tgt_alias})
- LOG.debug("Getting target list for evs %s, tgtalias: %s.",
- evsid, tgtalias)
+ if (refresh or
+ evs_id not in self.tgt_list.keys() or
+ tgt_alias is not None):
+ self.tgt_list[evs_id] = []
+ out, err = self._run_cmd("console-context", "--evs", evs_id,
+ 'iscsi-target', 'list')
- try:
- out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
- "--evs", evsid, 'iscsi-target', 'list',
- check_exit_code=True)
- except putils.ProcessExecutionError as e:
- LOG.error(_LE('Error getting iSCSI target info '
- 'from EVS %(evs)s.'), {'evs': evsid})
- LOG.debug("_get_targets out: %(out)s, err: %(err)s.",
- {'out': e.stdout, 'err': e.stderr})
- return []
+ if 'No targets' in out:
+ LOG.debug("No targets found in EVS %(evsid)s.",
+ {'evsid': evs_id})
+ return self.tgt_list[evs_id]
- tgt_list = []
- if 'No targets' in out:
- LOG.debug("No targets found in EVS %(evsid)s.", {'evsid': evsid})
- return tgt_list
+ tgt_raw_list = out.split('Alias')[1:]
+ for tgt_raw_info in tgt_raw_list:
+ tgt = {}
+ tgt['alias'] = tgt_raw_info.split('\n')[0].split(' ').pop()
+ tgt['iqn'] = tgt_raw_info.split('\n')[1].split(' ').pop()
+ tgt['secret'] = tgt_raw_info.split('\n')[3].split(' ').pop()
+ tgt['auth'] = tgt_raw_info.split('\n')[4].split(' ').pop()
+ lus = []
+ tgt_raw_info = tgt_raw_info.split('\n\n')[1]
+ tgt_raw_list = tgt_raw_info.split('\n')[2:]
- tgt_raw_list = out.split('Alias')[1:]
- for tgt_raw_info in tgt_raw_list:
- tgt = {}
- tgt['alias'] = tgt_raw_info.split('\n')[0].split(' ').pop()
- tgt['iqn'] = tgt_raw_info.split('\n')[1].split(' ').pop()
- tgt['secret'] = tgt_raw_info.split('\n')[3].split(' ').pop()
- tgt['auth'] = tgt_raw_info.split('\n')[4].split(' ').pop()
- luns = []
- tgt_raw_info = tgt_raw_info.split('\n\n')[1]
- tgt_raw_list = tgt_raw_info.split('\n')[2:]
+ for lu_raw_line in tgt_raw_list:
+ lu_raw_line = lu_raw_line.strip()
+ lu_raw_line = lu_raw_line.split(' ')
+ lu = {}
+ lu['id'] = lu_raw_line[0]
+ lu['name'] = lu_raw_line.pop()
+ lus.append(lu)
- for lun_raw_line in tgt_raw_list:
- lun_raw_line = lun_raw_line.strip()
- lun_raw_line = lun_raw_line.split(' ')
- lun = {}
- lun['id'] = lun_raw_line[0]
- lun['name'] = lun_raw_line.pop()
- luns.append(lun)
+ tgt['lus'] = lus
- tgt['luns'] = luns
+ if tgt_alias == tgt['alias']:
+ return tgt
- if tgtalias == tgt['alias']:
- return [tgt]
+ self.tgt_list[evs_id].append(tgt)
- tgt_list.append(tgt)
-
- if tgtalias is not None:
- # We tried to find 'tgtalias' but didn't find. Return an empty
+ if tgt_alias is not None:
+ # We tried to find 'tgtalias' but didn't find. Return a empty
# list.
LOG.debug("There's no target %(alias)s in EVS %(evsid)s.",
- {'alias': tgtalias, 'evsid': evsid})
+ {'alias': tgt_alias, 'evsid': evs_id})
return []
LOG.debug("Targets in EVS %(evs)s: %(tgtl)s.",
- {'evs': evsid, 'tgtl': tgt_list})
- return tgt_list
+ {'evs': evs_id, 'tgtl': self.tgt_list[evs_id]})
- def _get_unused_lunid(self, cmd, ip0, user, pw, tgt_info):
+ return self.tgt_list[evs_id]
- if len(tgt_info['luns']) == 0:
+ def _get_unused_luid(self, tgt_info):
+ """Gets a free logical unit id number to be used.
+
+ :param tgt_info: dictionary with the target information
+ :returns: a free logical unit id number
+ """
+ if len(tgt_info['lus']) == 0:
return 0
- free_lun = 0
- for lun in tgt_info['luns']:
- if int(lun['id']) == free_lun:
- free_lun += 1
+ free_lu = 0
+ for lu in tgt_info['lus']:
+ if int(lu['id']) == free_lu:
+ free_lu += 1
- if int(lun['id']) > free_lun:
- # Found a free LUN number
+ if int(lu['id']) > free_lu:
+ # Found a free LU number
break
- return free_lun
+ LOG.debug("Found the free LU ID: %(lu)s.", {'lu': free_lu})
- def get_nfs_info(self, cmd, ip0, user, pw):
- """Gets information on each NFS export.
+ return free_lu
- :param cmd: ssc command name
- :param ip0: string IP address of controller
- :param user: string user authentication for array
- :param pw: string password authentication for array
- :returns: formated string
- """
-
- out, err = self.run_cmd(cmd, ip0, user, pw,
- 'for-each-evs', '-q',
- 'nfs-export', 'list',
- check_exit_code=True)
-
- lines = out.split('\n')
- newout = ""
- export = ""
- path = ""
- for line in lines:
- inf = line.split()
- if 'Export name' in line:
- export = inf[2]
- if 'Export path' in line:
- path = inf[2]
- if 'File system info' in line:
- fs = ""
- if 'File system label' in line:
- fs = inf[3]
- if 'Transfer setting' in line and fs != "":
- fsid = self._get_fsid(cmd, ip0, user, pw, fs)
- evsid = self.get_evs(cmd, ip0, user, pw, fsid)
- ips = self._get_evsips(cmd, ip0, user, pw, evsid)
- newout += "Export: %s Path: %s HDP: %s FSID: %s \
- EVS: %s IPS: %s\n" \
- % (export, path, fs, fsid, evsid, ips)
- fs = ""
-
- LOG.debug('get_nfs_info: %(out)s -- %(err)s',
- {'out': newout, 'err': err})
- return newout
-
- def create_lu(self, cmd, ip0, user, pw, hdp, size, name):
+ def create_lu(self, fs_label, size, lu_name):
"""Creates a new Logical Unit.
If the operation can not be performed for some reason, utils.execute()
throws an error and aborts the operation. Used for iSCSI only
- :param cmd: ssc command name
- :param ip0: string IP address of controller
- :param user: string user authentication for array
- :param pw: string password authentication for array
- :param hdp: data Pool the logical unit will be created
- :param size: Size (Mb) of the new logical unit
- :param name: name of the logical unit
- :returns: formated string with 'LUN %d HDP: %d size: %s MB, is
- successfully created'
+ :param fs_label: data pool the Logical Unit will be created
+ :param size: Size (GB) of the new Logical Unit
+ :param lu_name: name of the Logical Unit
"""
+ evs_id = self.get_evs(fs_label)
- _evsid = self.get_evs(cmd, ip0, user, pw, hdp)
- out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
- "--evs", _evsid,
- 'iscsi-lu', 'add', "-e",
- name, hdp,
- '/.cinder/' + name + '.iscsi',
- size + 'M',
- check_exit_code=True)
+ self._run_cmd("console-context", "--evs", evs_id, 'iscsi-lu', 'add',
+ "-e", lu_name, fs_label, '/.cinder/' + lu_name +
+ '.iscsi', size + 'G')
- out = "LUN %s HDP: %s size: %s MB, is successfully created" \
- % (name, hdp, size)
+ LOG.debug('Created %(size)s GB LU: %(name)s FS: %(fs)s.',
+ {'size': size, 'name': lu_name, 'fs': fs_label})
- LOG.debug('create_lu: %s.', out)
- return out
+ def delete_lu(self, fs_label, lu_name):
+ """Deletes a Logical Unit.
- def delete_lu(self, cmd, ip0, user, pw, hdp, lun):
- """Delete an logical unit. Used for iSCSI only
-
- :param cmd: ssc command name
- :param ip0: string IP address of controller
- :param user: string user authentication for array
- :param pw: string password authentication for array
- :param hdp: data Pool of the logical unit
- :param lun: id of the logical unit being deleted
- :returns: formated string 'Logical unit deleted successfully.'
+ :param fs_label: data pool of the Logical Unit
+ :param lu_name: id of the Logical Unit being deleted
"""
+ evs_id = self.get_evs(fs_label)
+ self._run_cmd("console-context", "--evs", evs_id, 'iscsi-lu', 'del',
+ '-d', '-f', lu_name)
- _evsid = self.get_evs(cmd, ip0, user, pw, hdp)
- out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
- "--evs", _evsid,
- 'iscsi-lu', 'del', '-d',
- '-f', lun,
- check_exit_code=True)
+ LOG.debug('LU %(lu)s deleted.', {'lu': lu_name})
- LOG.debug('delete_lu: %(out)s -- %(err)s.', {'out': out, 'err': err})
- return out
-
- def create_dup(self, cmd, ip0, user, pw, src_lun, hdp, size, name):
- """Clones a volume
-
- Clone primitive used to support all iSCSI snapshot/cloning functions.
- Used for iSCSI only.
-
- :param cmd: ssc command name
- :param ip0: string IP address of controller
- :param user: string user authentication for array
- :param pw: string password authentication for array
- :param hdp: data Pool of the logical unit
- :param src_lun: id of the logical unit being deleted
- :param size: size of the LU being cloned. Only for logging purposes
- :returns: formated string
- """
-
- _evsid = self.get_evs(cmd, ip0, user, pw, hdp)
- out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
- "--evs", _evsid,
- 'iscsi-lu', 'clone', '-e',
- src_lun, name,
- '/.cinder/' + name + '.iscsi',
- check_exit_code=True)
-
- out = "LUN %s HDP: %s size: %s MB, is successfully created" \
- % (name, hdp, size)
-
- LOG.debug('create_dup: %(out)s -- %(err)s.', {'out': out, 'err': err})
- return out
-
- def file_clone(self, cmd, ip0, user, pw, fslabel, src, name):
- """Clones NFS files to a new one named 'name'
+ def file_clone(self, fs_label, src, name):
+ """Clones NFS files to a new one named 'name'.
Clone primitive used to support all NFS snapshot/cloning functions.
- :param cmd: ssc command name
- :param ip0: string IP address of controller
- :param user: string user authentication for array
- :param pw: string password authentication for array
- :param fslabel: file system label of the new file
+ :param fs_label: file system label of the new file
:param src: source file
:param name: target path of the new created file
- :returns: formated string
"""
+ fs_list = self._get_fs_list()
+ fs = fs_list.get(fs_label)
+ if not fs:
+ LOG.error(_LE("Can't find file %(file)s in FS %(label)s"),
+ {'file': src, 'label': fs_label})
+ msg = _('FS label: %s') % fs_label
+ raise exception.InvalidParameterValue(err=msg)
- _fsid = self._get_fsid(cmd, ip0, user, pw, fslabel)
- _evsid = self.get_evs(cmd, ip0, user, pw, _fsid)
- out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
- "--evs", _evsid,
- 'file-clone-create', '-f', fslabel,
- src, name,
- check_exit_code=True)
+ self._run_cmd("console-context", "--evs", fs['evsid'],
+ 'file-clone-create', '-f', fs_label, src, name)
- out = "LUN %s HDP: %s Clone: %s -> %s" % (name, _fsid, src, name)
+ def extend_lu(self, fs_label, new_size, lu_name):
+ """Extends an iSCSI volume.
- LOG.debug('file_clone: %(out)s -- %(err)s.', {'out': out, 'err': err})
- return out
-
- def extend_vol(self, cmd, ip0, user, pw, hdp, lun, new_size, name):
- """Extend a iSCSI volume.
-
- :param cmd: ssc command name
- :param ip0: string IP address of controller
- :param user: string user authentication for array
- :param pw: string password authentication for array
- :param hdp: data Pool of the logical unit
- :param lun: id of the logical unit being extended
- :param new_size: new size of the LU
- :param name: formated string
+ :param fs_label: data pool of the Logical Unit
+ :param new_size: new size of the Logical Unit
+ :param lu_name: name of the Logical Unit
"""
+ evs_id = self.get_evs(fs_label)
+ size = six.text_type(new_size)
+ self._run_cmd("console-context", "--evs", evs_id, 'iscsi-lu', 'expand',
+ lu_name, size + 'G')
- _evsid = self.get_evs(cmd, ip0, user, pw, hdp)
- out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
- "--evs", _evsid,
- 'iscsi-lu', 'expand',
- name, new_size + 'M',
- check_exit_code=True)
-
- out = ("LUN: %s successfully extended to %s MB" % (name, new_size))
-
- LOG.debug('extend_vol: %s.', out)
- return out
+ LOG.debug('LU %(lu)s extended.', {'lu': lu_name})
@utils.retry(putils.ProcessExecutionError, retries=HNAS_SSC_RETRIES,
wait_random=True)
- def add_iscsi_conn(self, cmd, ip0, user, pw, lun_name, hdp,
- port, tgtalias, initiator):
- """Setup the lun on on the specified target port
+ def add_iscsi_conn(self, lu_name, fs_label, port, tgt_alias, initiator):
+ """Sets up the Logical Unit on the specified target port.
- :param cmd: ssc command name
- :param ip0: string IP address of controller
- :param user: string user authentication for array
- :param pw: string password authentication for array
- :param lun_name: id of the logical unit being extended
- :param hdp: data pool of the logical unit
+ :param lu_name: id of the Logical Unit being extended
+ :param fs_label: data pool of the Logical Unit
:param port: iSCSI port
- :param tgtalias: iSCSI qualified name
+ :param tgt_alias: iSCSI qualified name
:param initiator: initiator address
+ :returns: dictionary (conn_info) with the connection information
+ conn_info={
+ 'lu': Logical Unit ID,
+ 'iqn': iSCSI qualified name,
+ 'lu_name': Logical Unit name,
+ 'initiator': iSCSI initiator,
+ 'fs_label': File system to connect,
+ 'port': Port to make the iSCSI connection
+ }
"""
+ conn_info = {}
+ lu_info = self.check_lu(lu_name, fs_label)
+ _evs_id = self.get_evs(fs_label)
- LOG.debug('Adding %(lun)s to %(tgt)s returns %(tgt)s.',
- {'lun': lun_name, 'tgt': tgtalias})
- found, lunid, tgt = self.check_lu(cmd, ip0, user, pw, lun_name, hdp)
- evsid = self.get_evs(cmd, ip0, user, pw, hdp)
+ if not lu_info['mapped']:
+ tgt = self._get_targets(_evs_id, tgt_alias)
+ lu_id = self._get_unused_luid(tgt)
+ conn_info['lu_id'] = lu_id
+ conn_info['iqn'] = tgt['iqn']
- if found:
- conn = (int(lunid), lun_name, initiator, int(lunid), tgt['iqn'],
- int(lunid), hdp, port)
- out = ("H-LUN: %d mapped LUN: %s, iSCSI Initiator: %s "
- "@ index: %d, and Target: %s @ index %d is "
- "successfully paired @ CTL: %s, Port: %s.") % conn
+ # In busy situations where 2 or more instances of the driver are
+ # trying to map an LU, 2 hosts can retrieve the same 'lu_id',
+ # and try to map the LU in the same LUN. To handle that we
+ # capture the ProcessExecutionError exception, backoff for some
+ # seconds and retry it.
+ self._run_cmd("console-context", "--evs", _evs_id, 'iscsi-target',
+ 'addlu', tgt_alias, lu_name, six.text_type(lu_id))
else:
- tgt = self._get_targets(cmd, ip0, user, pw, evsid, tgtalias)
- lunid = self._get_unused_lunid(cmd, ip0, user, pw, tgt[0])
+ conn_info['lu_id'] = lu_info['id']
+ conn_info['iqn'] = lu_info['tgt']['iqn']
- out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
- "--evs", evsid,
- 'iscsi-target', 'addlu',
- tgtalias, lun_name, six.text_type(lunid),
- check_exit_code=True)
+ conn_info['lu_name'] = lu_name
+ conn_info['initiator'] = initiator
+ conn_info['fs'] = fs_label
+ conn_info['port'] = port
- conn = (int(lunid), lun_name, initiator, int(lunid), tgt[0]['iqn'],
- int(lunid), hdp, port)
- out = ("H-LUN: %d mapped LUN: %s, iSCSI Initiator: %s "
- "@ index: %d, and Target: %s @ index %d is "
- "successfully paired @ CTL: %s, Port: %s.") % conn
+ LOG.debug('add_iscsi_conn: LU %(lu)s added to %(tgt)s.',
+ {'lu': lu_name, 'tgt': tgt_alias})
- LOG.debug('add_iscsi_conn: returns %s.', out)
- return out
+ return conn_info
- def del_iscsi_conn(self, cmd, ip0, user, pw, evsid, iqn, hlun):
- """Remove the lun on on the specified target port
+ def del_iscsi_conn(self, evs_id, iqn, lu_id):
+ """Removes the Logical Unit on the specified target port.
- :param cmd: ssc command name
- :param ip0: string IP address of controller
- :param user: string user authentication for array
- :param pw: string password authentication for array
- :param evsid: EVSID for the file system
+ :param evs_id: EVSID for the file system
:param iqn: iSCSI qualified name
- :param hlun: logical unit id
- :returns: formated string
+ :param lu_id: Logical Unit id
"""
+ found = False
+ out, err = self._run_cmd("console-context", "--evs", evs_id,
+ 'iscsi-target', 'list', iqn)
- out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
- "--evs", evsid,
- 'iscsi-target', 'list', iqn,
- check_exit_code=True)
-
+ # see if LU is already detached
lines = out.split('\n')
- out = ("H-LUN: %d already deleted from target %s" % (int(hlun), iqn))
- # see if lun is already detached
for line in lines:
if line.startswith(' '):
- lunline = line.split()[0]
- if lunline[0].isdigit() and lunline == hlun:
- out = ""
+ lu_line = line.split()[0]
+ if lu_line[0].isdigit() and lu_line == lu_id:
+ found = True
break
- if out != "":
- # hlun wasn't found
- LOG.info(_LI('del_iscsi_conn: hlun not found %s.'), out)
- return out
+ # LU wasn't found
+ if not found:
+ LOG.debug("del_iscsi_conn: LU already deleted from "
+ "target %(iqn)s", {'lu': lu_id, 'iqn': iqn})
+ return
# remove the LU from the target
- out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
- "--evs", evsid,
- 'iscsi-target', 'dellu',
- '-f', iqn, hlun,
- check_exit_code=True)
+ self._run_cmd("console-context", "--evs", evs_id, 'iscsi-target',
+ 'dellu', '-f', iqn, lu_id)
- out = "H-LUN: %d successfully deleted from target %s" \
- % (int(hlun), iqn)
+ LOG.debug("del_iscsi_conn: LU: %(lu)s successfully deleted from "
+ "target %(iqn)s", {'lu': lu_id, 'iqn': iqn})
- LOG.debug('del_iscsi_conn: %s.', out)
- return out
-
- def get_targetiqn(self, cmd, ip0, user, pw, targetalias, hdp, secret):
- """Obtain the targets full iqn
+ def get_target_iqn(self, tgt_alias, fs_label):
+ """Obtains the target full iqn
Returns the target's full iqn rather than its alias.
- :param cmd: ssc command name
- :param ip0: string IP address of controller
- :param user: string user authentication for array
- :param pw: string password authentication for array
- :param targetalias: alias of the target
- :param hdp: data pool of the logical unit
- :param secret: CHAP secret of the target
+
+ :param tgt_alias: alias of the target
+ :param fs_label: data pool of the Logical Unit
:returns: string with full IQN
"""
-
- _evsid = self.get_evs(cmd, ip0, user, pw, hdp)
- out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
- "--evs", _evsid,
- 'iscsi-target', 'list', targetalias,
- check_exit_code=True)
-
- if "does not exist" in out:
- if secret == "":
- secret = '""'
- out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
- "--evs", _evsid,
- 'iscsi-target', 'add',
- targetalias, secret,
- check_exit_code=True)
- else:
- out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
- "--evs", _evsid,
- 'iscsi-target', 'add',
- targetalias, secret,
- check_exit_code=True)
- if "success" in out:
- return targetalias
+ _evs_id = self.get_evs(fs_label)
+ out, err = self._run_cmd("console-context", "--evs", _evs_id,
+ 'iscsi-target', 'list', tgt_alias)
lines = out.split('\n')
# returns the first iqn
for line in lines:
- if 'Alias' in line:
- fulliqn = line.split()[2]
- return fulliqn
+ if 'Globally unique name' in line:
+ full_iqn = line.split()[3]
+ return full_iqn
- def set_targetsecret(self, cmd, ip0, user, pw, targetalias, hdp, secret):
+ def set_target_secret(self, targetalias, fs_label, secret):
"""Sets the chap secret for the specified target.
- :param cmd: ssc command name
- :param ip0: string IP address of controller
- :param user: string user authentication for array
- :param pw: string password authentication for array
:param targetalias: alias of the target
- :param hdp: data pool of the logical unit
+ :param fs_label: data pool of the Logical Unit
:param secret: CHAP secret of the target
"""
+ _evs_id = self.get_evs(fs_label)
+ self._run_cmd("console-context", "--evs", _evs_id, 'iscsi-target',
+ 'mod', '-s', secret, '-a', 'enable', targetalias)
- _evsid = self.get_evs(cmd, ip0, user, pw, hdp)
- out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
- "--evs", _evsid,
- 'iscsi-target', 'list',
- targetalias,
- check_exit_code=False)
+ LOG.debug("set_target_secret: Secret set on target %(tgt)s.",
+ {'tgt': targetalias})
- if "does not exist" in out:
- out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
- "--evs", _evsid,
- 'iscsi-target', 'add',
- targetalias, secret,
- check_exit_code=True)
- else:
- LOG.info(_LI('targetlist: %s'), targetalias)
- out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
- "--evs", _evsid,
- 'iscsi-target', 'mod',
- '-s', secret, '-a', 'enable',
- targetalias,
- check_exit_code=True)
+ def get_target_secret(self, targetalias, fs_label):
+ """Gets the chap secret for the specified target.
- def get_targetsecret(self, cmd, ip0, user, pw, targetalias, hdp):
- """Returns the chap secret for the specified target.
-
- :param cmd: ssc command name
- :param ip0: string IP address of controller
- :param user: string user authentication for array
- :param pw: string password authentication for array
:param targetalias: alias of the target
- :param hdp: data pool of the logical unit
- :return secret: CHAP secret of the target
+ :param fs_label: data pool of the Logical Unit
+ :returns: CHAP secret of the target
"""
-
- _evsid = self.get_evs(cmd, ip0, user, pw, hdp)
- out, err = self.run_cmd(cmd, ip0, user, pw, "console-context",
- "--evs", _evsid,
- 'iscsi-target', 'list', targetalias,
- check_exit_code=True)
+ _evs_id = self.get_evs(fs_label)
+ out, err = self._run_cmd("console-context", "--evs", _evs_id,
+ 'iscsi-target', 'list', targetalias)
enabled = ""
secret = ""
@@ -771,106 +529,273 @@ class HnasBackend(object):
else:
return ""
- def check_target(self, cmd, ip0, user, pw, hdp, target_alias):
- """Checks if a given target exists and gets its info
+ def check_target(self, fs_label, target_alias):
+ """Checks if a given target exists and gets its info.
- :param cmd: ssc command name
- :param ip0: string IP address of controller
- :param user: string user authentication for array
- :param pw: string password authentication for array
- :param hdp: pool name used
+ :param fs_label: pool name used
:param target_alias: alias of the target
- :returns: True if target exists
- :returns: list with the target info
+ :returns: dictionary (tgt_info)
+ tgt_info={
+ 'alias': The alias of the target,
+ 'found': boolean to inform if the target was found or not,
+ 'tgt': dictionary with the target information
+ }
"""
+ tgt_info = {}
+ _evs_id = self.get_evs(fs_label)
+ _tgt_list = self._get_targets(_evs_id)
- LOG.debug("Checking if target %(tgt)s exists.", {'tgt': target_alias})
- evsid = self.get_evs(cmd, ip0, user, pw, hdp)
- tgt_list = self._get_targets(cmd, ip0, user, pw, evsid)
-
- for tgt in tgt_list:
+ for tgt in _tgt_list:
if tgt['alias'] == target_alias:
- attached_luns = len(tgt['luns'])
- LOG.debug("Target %(tgt)s has %(lun)s volumes.",
- {'tgt': target_alias, 'lun': attached_luns})
- return True, tgt
+ attached_lus = len(tgt['lus'])
+ tgt_info['found'] = True
+ tgt_info['tgt'] = tgt
+ LOG.debug("Target %(tgt)s has %(lu)s volumes.",
+ {'tgt': target_alias, 'lu': attached_lus})
+ return tgt_info
- LOG.debug("Target %(tgt)s does not exist.", {'tgt': target_alias})
- return False, None
+ tgt_info['found'] = False
+ tgt_info['tgt'] = None
- def check_lu(self, cmd, ip0, user, pw, volume_name, hdp):
- """Checks if a given LUN is already mapped
+ LOG.debug("check_target: Target %(tgt)s does not exist.",
+ {'tgt': target_alias})
- :param cmd: ssc command name
- :param ip0: string IP address of controller
- :param user: string user authentication for array
- :param pw: string password authentication for array
- :param volume_name: number of the LUN
- :param hdp: storage pool of the LUN
- :returns: True if the lun is attached
- :returns: the LUN id
- :returns: Info related to the target
+ return tgt_info
+
+ def check_lu(self, vol_name, fs_label):
+ """Checks if a given LU is already mapped
+
+ :param vol_name: name of the LU
+ :param fs_label: storage pool of the LU
+ :returns: dictionary (lu_info) with LU information
+ lu_info={
+ 'mapped': LU state (mapped or not),
+ 'id': ID of the LU,
+ 'tgt': the iSCSI target alias
+ }
"""
-
- LOG.debug("Checking if vol %s (hdp: %s) is attached.",
- volume_name, hdp)
- evsid = self.get_evs(cmd, ip0, user, pw, hdp)
- tgt_list = self._get_targets(cmd, ip0, user, pw, evsid)
+ lu_info = {}
+ evs_id = self.get_evs(fs_label)
+ tgt_list = self._get_targets(evs_id, refresh=True)
for tgt in tgt_list:
- if len(tgt['luns']) == 0:
+ if len(tgt['lus']) == 0:
continue
- for lun in tgt['luns']:
- lunid = lun['id']
- lunname = lun['name']
- if lunname[:29] == volume_name[:29]:
- LOG.debug("LUN %(lun)s attached on %(lunid)s, "
+ for lu in tgt['lus']:
+ lu_id = lu['id']
+ lu_name = lu['name']
+ if lu_name[:29] == vol_name[:29]:
+ lu_info['mapped'] = True
+ lu_info['id'] = lu_id
+ lu_info['tgt'] = tgt
+ LOG.debug("LU %(lu)s attached on %(luid)s, "
"target: %(tgt)s.",
- {'lun': volume_name, 'lunid': lunid, 'tgt': tgt})
- return True, lunid, tgt
+ {'lu': vol_name, 'luid': lu_id, 'tgt': tgt})
+ return lu_info
- LOG.debug("LUN %(lun)s not attached.", {'lun': volume_name})
- return False, 0, None
+ lu_info['mapped'] = False
+ lu_info['id'] = 0
+ lu_info['tgt'] = None
- def get_existing_lu_info(self, cmd, ip0, user, pw, fslabel, lun):
- """Returns the information for the specified Logical Unit.
+ LOG.debug("LU %(lu)s not attached.", {'lu': vol_name})
+
+ return lu_info
+
+ def get_existing_lu_info(self, lu_name, fs_label=None, evs_id=None):
+ """Gets the information for the specified Logical Unit.
Returns the information of an existing Logical Unit on HNAS, according
to the name provided.
- :param cmd: the command that will be run on SMU
- :param ip0: string IP address of controller
- :param user: string user authentication for array
- :param pw: string password authentication for array
- :param fslabel: label of the file system
- :param lun: label of the logical unit
+ :param lu_name: label of the Logical Unit
+ :param fs_label: label of the file system
+ :param evs_id: ID of the EVS where the LU is located
+ :returns: dictionary (lu_info) with LU information
+ lu_info={
+ 'name': A Logical Unit name,
+ 'comment': A comment about the LU, not used for Cinder,
+ 'path': Path to LU inside filesystem,
+ 'size': Logical Unit size returned always in GB (volume size),
+ 'filesystem': File system where the Logical Unit was created,
+ 'fs_mounted': Information about the state of file system
+ (mounted or not),
+ 'lu_mounted': Information about the state of Logical Unit
+ (mounted or not)
+ }
"""
+ lu_info = {}
+ if evs_id is None:
+ evs_id = self.get_evs(fs_label)
- evs = self.get_evs(cmd, ip0, user, pw, fslabel)
- out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs",
- evs, 'iscsi-lu', 'list', lun)
+ lu_name = "'{}'".format(lu_name)
+ out, err = self._run_cmd("console-context", "--evs", evs_id,
+ 'iscsi-lu', 'list', lu_name)
- return out
+ if 'does not exist.' not in out:
+ aux = out.split('\n')
+ lu_info['name'] = aux[0].split(':')[1].strip()
+ lu_info['comment'] = aux[1].split(':')[1].strip()
+ lu_info['path'] = aux[2].split(':')[1].strip()
+ lu_info['size'] = aux[3].split(':')[1].strip()
+ lu_info['filesystem'] = aux[4].split(':')[1].strip()
+ lu_info['fs_mounted'] = aux[5].split(':')[1].strip()
+ lu_info['lu_mounted'] = aux[6].split(':')[1].strip()
- def rename_existing_lu(self, cmd, ip0, user, pw, fslabel,
- new_name, vol_name):
+ if 'TB' in lu_info['size']:
+ sz_convert = float(lu_info['size'].split()[0]) * units.Ki
+ lu_info['size'] = sz_convert
+ else:
+ lu_info['size'] = float(lu_info['size'].split()[0])
+
+ LOG.debug('get_existing_lu_info: LU info: %(lu)s', {'lu': lu_info})
+
+ return lu_info
+
+ def rename_existing_lu(self, fs_label, vol_name, new_name):
"""Renames the specified Logical Unit.
Renames an existing Logical Unit on HNAS according to the new name
provided.
- :param cmd: command that will be run on SMU
- :param ip0: string IP address of controller
- :param user: string user authentication for array
- :param pw: string password authentication for array
- :param fslabel: label of the file system
- :param new_name: new name to the existing volume
+ :param fs_label: label of the file system
:param vol_name: current name of the existing volume
+ :param new_name: new name to the existing volume
"""
- evs = self.get_evs(cmd, ip0, user, pw, fslabel)
- out, err = self.run_cmd(cmd, ip0, user, pw, "console-context", "--evs",
- evs, "iscsi-lu", "mod", "-n", new_name,
- vol_name)
- return out
+ new_name = "'{}'".format(new_name)
+ evs_id = self.get_evs(fs_label)
+ self._run_cmd("console-context", "--evs", evs_id, "iscsi-lu", "mod",
+ "-n", new_name, vol_name)
+
+ LOG.debug('rename_existing_lu_info:'
+ 'LU %(old)s was renamed to %(new)s',
+ {'old': vol_name, 'new': new_name})
+
+ def _get_fs_list(self):
+ """Gets a list of file systems configured on the backend.
+
+ :returns: a list with the Filesystems configured on HNAS
+ """
+ if not self.fslist:
+ fslist_out, err = self._run_cmd('evsfs', 'list')
+ list_raw = fslist_out.split('\n')[3:-2]
+
+ for fs_raw in list_raw:
+ fs = {}
+
+ fs_raw = fs_raw.split()
+ fs['id'] = fs_raw[0]
+ fs['label'] = fs_raw[1]
+ fs['permid'] = fs_raw[2]
+ fs['evsid'] = fs_raw[3]
+ fs['evslabel'] = fs_raw[4]
+ self.fslist[fs['label']] = fs
+
+ return self.fslist
+
+ def _get_evs_list(self):
+ """Gets a list of EVS configured on the backend.
+
+ :returns: a list of the EVS configured on HNAS
+ """
+ evslist_out, err = self._run_cmd('evs', 'list')
+
+ evslist = {}
+ idx = 0
+ for evs_raw in evslist_out.split('\n'):
+ idx += 1
+ if 'Service' in evs_raw and 'Online' in evs_raw:
+ evs = {}
+ evs_line = evs_raw.split()
+ evs['node'] = evs_line[0]
+ evs['id'] = evs_line[1]
+ evs['label'] = evs_line[3]
+ evs['ips'] = []
+ evs['ips'].append(evs_line[6])
+ # Each EVS can have a list of IPs that are displayed in the
+ # next lines of the evslist_out. We need to check if the next
+ # lines is a new EVS entry or and IP of this current EVS.
+ for evs_ip_raw in evslist_out.split('\n')[idx:]:
+ if 'Service' in evs_ip_raw or not evs_ip_raw.split():
+ break
+ ip = evs_ip_raw.split()[0]
+ evs['ips'].append(ip)
+
+ evslist[evs['label']] = evs
+
+ return evslist
+
+ def get_export_list(self):
+ """Gets information on each NFS export.
+
+ :returns: a list of the exports configured on HNAS
+ """
+ nfs_export_out, _ = self._run_cmd('for-each-evs', '-q', 'nfs-export',
+ 'list')
+ fs_list = self._get_fs_list()
+ evs_list = self._get_evs_list()
+
+ export_list = []
+
+ for export_raw_data in nfs_export_out.split("Export name:")[1:]:
+ export_info = {}
+ export_data = export_raw_data.split('\n')
+
+ export_info['name'] = export_data[0].strip()
+ export_info['path'] = export_data[1].split(':')[1].strip()
+ export_info['fs'] = export_data[2].split(':')[1].strip()
+
+ if "*** not available ***" in export_raw_data:
+ export_info['size'] = -1
+ export_info['free'] = -1
+ else:
+ evslbl = fs_list[export_info['fs']]['evslabel']
+ export_info['evs'] = evs_list[evslbl]['ips']
+
+ size = export_data[3].split(':')[1].strip().split()[0]
+ multiplier = export_data[3].split(':')[1].strip().split()[1]
+ if multiplier == 'TB':
+ export_info['size'] = float(size) * units.Ki
+ else:
+ export_info['size'] = float(size)
+
+ free = export_data[4].split(':')[1].strip().split()[0]
+ fmultiplier = export_data[4].split(':')[1].strip().split()[1]
+ if fmultiplier == 'TB':
+ export_info['free'] = float(free) * units.Ki
+ else:
+ export_info['free'] = float(free)
+
+ export_list.append(export_info)
+
+ return export_list
+
+ def create_cloned_lu(self, src_lu, fs_label, clone_name):
+ """Clones a Logical Unit
+
+ Clone primitive used to support all iSCSI snapshot/cloning functions.
+
+ :param src_lu: id of the Logical Unit being deleted
+ :param fs_label: data pool of the Logical Unit
+ :param clone_name: name of the snapshot
+ """
+ evs_id = self.get_evs(fs_label)
+ self._run_cmd("console-context", "--evs", evs_id, 'iscsi-lu', 'clone',
+ '-e', src_lu, clone_name,
+ '/.cinder/' + clone_name + '.iscsi')
+
+ LOG.debug('LU %(lu)s cloned.', {'lu': clone_name})
+
+ def create_target(self, tgt_alias, fs_label, secret):
+ """Creates a new iSCSI target
+
+ :param tgt_alias: the alias with which the target will be created
+ :param fs_label: the label of the file system to create the target
+ :param secret: the secret for authentication of the target
+ """
+ _evs_id = self.get_evs(fs_label)
+ self._run_cmd("console-context", "--evs", _evs_id,
+ 'iscsi-target', 'add', tgt_alias, secret)
+
+ self._get_targets(_evs_id, refresh=True)
diff --git a/cinder/volume/drivers/hitachi/hnas_iscsi.py b/cinder/volume/drivers/hitachi/hnas_iscsi.py
index 36e41fe43ab..066bcd03913 100644
--- a/cinder/volume/drivers/hitachi/hnas_iscsi.py
+++ b/cinder/volume/drivers/hitachi/hnas_iscsi.py
@@ -17,34 +17,31 @@
"""
iSCSI Cinder Volume driver for Hitachi Unified Storage (HUS-HNAS) platform.
"""
-import os
-import re
-import six
-from xml.etree import ElementTree as ETree
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
-from oslo_utils import units
-
+import six
from cinder import exception
-from cinder.i18n import _, _LE, _LI, _LW
+from cinder.i18n import _, _LE, _LI
from cinder import interface
+
from cinder import utils as cinder_utils
from cinder.volume import driver
from cinder.volume.drivers.hitachi import hnas_backend
+from cinder.volume.drivers.hitachi import hnas_utils
from cinder.volume import utils
-from cinder.volume import volume_types
-HDS_HNAS_ISCSI_VERSION = '4.3.0'
+
+HNAS_ISCSI_VERSION = '5.0.0'
LOG = logging.getLogger(__name__)
iSCSI_OPTS = [
cfg.StrOpt('hds_hnas_iscsi_config_file',
default='/opt/hds/hnas/cinder_iscsi_conf.xml',
- help='Configuration file for HDS iSCSI cinder plugin')]
+ help='Configuration file for HNAS iSCSI cinder plugin')]
CONF = cfg.CONF
CONF.register_opts(iSCSI_OPTS)
@@ -53,277 +50,123 @@ HNAS_DEFAULT_CONFIG = {'hnas_cmd': 'ssc',
'chap_enabled': 'True',
'ssh_port': '22'}
MAX_HNAS_ISCSI_TARGETS = 32
-
-
-def factory_bend(drv_configs):
- return hnas_backend.HnasBackend(drv_configs)
-
-
-def _loc_info(loc):
- """Parse info from location string."""
-
- LOG.info(_LI("Parse_loc: %s"), loc)
- info = {}
- tup = loc.split(',')
- if len(tup) < 5:
- info['id_lu'] = tup[0].split('.')
- return info
- info['id_lu'] = tup[2].split('.')
- info['tgt'] = tup
- return info
-
-
-def _xml_read(root, element, check=None):
- """Read an xml element."""
-
- val = root.findtext(element)
-
- # mandatory parameter not found
- if val is None and check:
- raise exception.ParameterNotFound(param=element)
-
- # tag not found
- if val is None:
- return None
-
- svc_tag_pattern = re.compile("svc_[0-3]$")
- # tag found but empty parameter.
- if not val.strip():
- # Service tags are empty
- if svc_tag_pattern.search(element):
- return ""
- else:
- raise exception.ParameterNotFound(param=element)
-
- LOG.debug(_LI("%(element)s: %(val)s"),
- {'element': element,
- 'val': val if element != 'password' else '***'})
-
- return val.strip()
-
-
-def _read_config(xml_config_file):
- """Read hds driver specific xml config file."""
-
- if not os.access(xml_config_file, os.R_OK):
- msg = (_("Can't open config file: %s") % xml_config_file)
- raise exception.NotFound(message=msg)
-
- try:
- root = ETree.parse(xml_config_file).getroot()
- except Exception:
- msg = (_("Error parsing config file: %s") % xml_config_file)
- raise exception.ConfigNotFound(message=msg)
-
- # mandatory parameters
- config = {}
- arg_prereqs = ['mgmt_ip0', 'username']
- for req in arg_prereqs:
- config[req] = _xml_read(root, req, True)
-
- # optional parameters
- opt_parameters = ['hnas_cmd', 'ssh_enabled', 'chap_enabled',
- 'cluster_admin_ip0']
- for req in opt_parameters:
- config[req] = _xml_read(root, req)
-
- if config['chap_enabled'] is None:
- config['chap_enabled'] = HNAS_DEFAULT_CONFIG['chap_enabled']
-
- if config['ssh_enabled'] == 'True':
- config['ssh_private_key'] = _xml_read(root, 'ssh_private_key', True)
- config['ssh_port'] = _xml_read(root, 'ssh_port')
- config['password'] = _xml_read(root, 'password')
- if config['ssh_port'] is None:
- config['ssh_port'] = HNAS_DEFAULT_CONFIG['ssh_port']
- else:
- # password is mandatory when not using SSH
- config['password'] = _xml_read(root, 'password', True)
-
- if config['hnas_cmd'] is None:
- config['hnas_cmd'] = HNAS_DEFAULT_CONFIG['hnas_cmd']
-
- config['hdp'] = {}
- config['services'] = {}
-
- # min one needed
- for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']:
- if _xml_read(root, svc) is None:
- continue
- service = {'label': svc}
-
- # none optional
- for arg in ['volume_type', 'hdp', 'iscsi_ip']:
- service[arg] = _xml_read(root, svc + '/' + arg, True)
- config['services'][service['volume_type']] = service
- config['hdp'][service['hdp']] = service['hdp']
-
- # at least one service required!
- if config['services'].keys() is None:
- raise exception.ParameterNotFound(param="No service found")
-
- return config
+MAX_HNAS_LUS_PER_TARGET = 32
@interface.volumedriver
-class HDSISCSIDriver(driver.ISCSIDriver):
- """HDS HNAS volume driver.
+class HNASISCSIDriver(driver.ISCSIDriver):
+ """HNAS iSCSI volume driver.
Version history:
- .. code-block:: none
-
- 1.0.0: Initial driver version
- 2.2.0: Added support to SSH authentication
- 3.2.0: Added pool aware scheduling
- Fixed concurrency errors
- 3.3.0: Fixed iSCSI target limitation error
- 4.0.0: Added manage/unmanage features
- 4.1.0: Fixed XML parser checks on blank options
- 4.2.0: Fixed SSH and cluster_admin_ip0 verification
- 4.3.0: Fixed attachment with os-brick 1.0.0
+ code-block:: none
+ Version 1.0.0: Initial driver version
+ Version 2.2.0: Added support to SSH authentication
+ Version 3.2.0: Added pool aware scheduling
+ Fixed concurrency errors
+ Version 3.3.0: Fixed iSCSI target limitation error
+ Version 4.0.0: Added manage/unmanage features
+ Version 4.1.0: Fixed XML parser checks on blank options
+ Version 4.2.0: Fixed SSH and cluster_admin_ip0 verification
+ Version 4.3.0: Fixed attachment with os-brick 1.0.0
+ Version 5.0.0: Code cleaning up
+ New communication interface between the driver and HNAS
+ Removed the option to use local SSC (ssh_enabled=False)
+ Updated to use versioned objects
+ Changed the class name to HNASISCSIDriver
"""
def __init__(self, *args, **kwargs):
- """Initialize, read different config parameters."""
+ """Initializes and reads different config parameters."""
+ self.configuration = kwargs.get('configuration', None)
- super(HDSISCSIDriver, self).__init__(*args, **kwargs)
- self.driver_stats = {}
self.context = {}
- self.configuration.append_config_values(iSCSI_OPTS)
- self.config = _read_config(
- self.configuration.hds_hnas_iscsi_config_file)
- self.type = 'HNAS'
+ service_parameters = ['volume_type', 'hdp', 'iscsi_ip']
+ optional_parameters = ['hnas_cmd', 'cluster_admin_ip0',
+ 'chap_enabled']
- self.platform = self.type.lower()
- LOG.info(_LI("Backend type: %s"), self.type)
- self.bend = factory_bend(self.config)
+ if self.configuration:
+ self.configuration.append_config_values(iSCSI_OPTS)
+ self.config = hnas_utils.read_config(
+ self.configuration.hds_hnas_iscsi_config_file,
+ service_parameters,
+ optional_parameters)
- def _array_info_get(self):
- """Get array parameters."""
-
- out = self.bend.get_version(self.config['hnas_cmd'],
- HDS_HNAS_ISCSI_VERSION,
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'])
- inf = out.split()
-
- return inf[1], 'hnas_' + inf[1], inf[6]
-
- def _get_iscsi_info(self):
- """Validate array iscsi parameters."""
-
- out = self.bend.get_iscsi_info(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'])
- lines = out.split('\n')
-
- # dict based on iSCSI portal ip addresses
- conf = {}
- for line in lines:
- # only record up links
- if 'CTL' in line and 'Up' in line:
- inf = line.split()
- (ctl, port, ip, ipp) = (inf[1], inf[3], inf[5], inf[7])
- conf[ip] = {}
- conf[ip]['ctl'] = ctl
- conf[ip]['port'] = port
- conf[ip]['iscsi_port'] = ipp
- LOG.debug("portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(pt)s",
- {'ip': ip, 'ipp': ipp, 'ctl': ctl, 'pt': port})
-
- return conf
+ super(HNASISCSIDriver, self).__init__(*args, **kwargs)
+ self.backend = hnas_backend.HNASSSHBackend(self.config)
def _get_service(self, volume):
- """Get the available service parameters
+ """Gets the available service parameters.
- Get the available service parametersfor a given volume using its
- type.
- :param volume: dictionary volume reference
- :returns: HDP related to the service
+ Get the available service parameters for a given volume using its
+ type.
+
+ :param volume: dictionary volume reference
+ :returns: HDP (file system) related to the service or error if no
+ configuration is found.
+ :raises: ParameterNotFound
"""
-
- label = utils.extract_host(volume['host'], level='pool')
- LOG.info(_LI("Using service label: %s"), label)
+ label = utils.extract_host(volume.host, level='pool')
+ LOG.info(_LI("Using service label: %(lbl)s."), {'lbl': label})
if label in self.config['services'].keys():
svc = self.config['services'][label]
return svc['hdp']
else:
- LOG.info(_LI("Available services: %s."),
- self.config['services'].keys())
- LOG.error(_LE("No configuration found for service: %s."), label)
+ LOG.info(_LI("Available services: %(svc)s."),
+ {'svc': self.config['services'].keys()})
+ LOG.error(_LE("No configuration found for service: %(lbl)s."),
+ {'lbl': label})
raise exception.ParameterNotFound(param=label)
def _get_service_target(self, volume):
- """Get the available service parameters
+ """Gets the available service parameters
- Get the available service parameters for a given volume using
- its type.
- :param volume: dictionary volume reference
+ Gets the available service parameters for a given volume using its
+ type.
+ :param volume: dictionary volume reference
+ :returns: service target information or raises error
+ :raises: NoMoreTargets
"""
+ fs_label = self._get_service(volume)
+ evs_id = self.backend.get_evs(fs_label)
- hdp = self._get_service(volume)
- info = _loc_info(volume['provider_location'])
- (arid, lun_name) = info['id_lu']
-
- evsid = self.bend.get_evs(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'],
- hdp)
- svc_label = utils.extract_host(volume['host'], level='pool')
+ svc_label = utils.extract_host(volume.host, level='pool')
svc = self.config['services'][svc_label]
- LOG.info(_LI("_get_service_target hdp: %s."), hdp)
- LOG.info(_LI("config[services]: %s."), self.config['services'])
+ lu_info = self.backend.check_lu(volume.name, fs_label)
- mapped, lunid, tgt = self.bend.check_lu(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'],
- lun_name, hdp)
-
- LOG.info(_LI("Target is %(map)s! Targetlist = %(tgtl)s."),
- {'map': "mapped" if mapped else "not mapped", 'tgtl': tgt})
-
- # The volume is already mapped to a LUN, so no need to create any
+ # The volume is already mapped to a LU, so no need to create any
# targets
- if mapped:
- service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'],
- svc['port'], hdp, tgt['alias'], tgt['secret'])
+ if lu_info['mapped']:
+ service = (
+ svc['iscsi_ip'], svc['iscsi_port'], svc['evs'], svc['port'],
+ fs_label, lu_info['tgt']['alias'], lu_info['tgt']['secret'])
return service
# Each EVS can have up to 32 targets. Each target can have up to 32
- # LUNs attached and have the name format 'evs-tgt<0-N>'. We run
+ # LUs attached and have the name format 'evs-tgt<0-N>'. We run
# from the first 'evs1-tgt0' until we find a target that is not already
- # created in the BE or is created but have slots to place new targets.
- found_tgt = False
+ # created in the BE or is created but have slots to place new LUs.
+ tgt_alias = ''
for i in range(0, MAX_HNAS_ISCSI_TARGETS):
- tgt_alias = 'evs' + evsid + '-tgt' + six.text_type(i)
- # TODO(erlon): we need to go to the BE 32 times here
- tgt_exist, tgt = self.bend.check_target(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'],
- hdp, tgt_alias)
- if tgt_exist and len(tgt['luns']) < 32 or not tgt_exist:
+ tgt_alias = 'evs' + evs_id + '-tgt' + six.text_type(i)
+ tgt = self.backend.check_target(fs_label, tgt_alias)
+
+ if (tgt['found'] and
+ len(tgt['tgt']['lus']) < MAX_HNAS_LUS_PER_TARGET or
+ not tgt['found']):
# Target exists and has free space or, target does not exist
# yet. Proceed and use the target or create a target using this
# name.
- found_tgt = True
break
-
- # If we've got here and found_tgt is not True, we run out of targets,
- # raise and go away.
- if not found_tgt:
+ else:
+ # If we've got here, we run out of targets, raise and go away.
LOG.error(_LE("No more targets available."))
raise exception.NoMoreTargets(param=tgt_alias)
- LOG.info(_LI("Using target label: %s."), tgt_alias)
+ LOG.info(_LI("Using target label: %(tgt)s."), {'tgt': tgt_alias})
# Check if we have a secret stored for this target so we don't have to
# go to BE on every query
@@ -340,526 +183,102 @@ class HDSISCSIDriver(driver.ISCSIDriver):
# iscsi_secret has already been set, retrieve the secret if
# available, otherwise generate and store
if self.config['chap_enabled'] == 'True':
- # It may not exist, create and set secret.
+ # CHAP support is enabled. Tries to get the target secret.
if 'iscsi_secret' not in tgt_info.keys():
- LOG.info(_LI("Retrieving secret for service: %s."),
- tgt_alias)
-
- out = self.bend.get_targetsecret(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'],
- tgt_alias, hdp)
+ LOG.info(_LI("Retrieving secret for service: %(tgt)s."),
+ {'tgt': tgt_alias})
+ out = self.backend.get_target_secret(tgt_alias, fs_label)
tgt_info['iscsi_secret'] = out
- if tgt_info['iscsi_secret'] == "":
- randon_secret = utils.generate_password()[0:15]
- tgt_info['iscsi_secret'] = randon_secret
- self.bend.set_targetsecret(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'],
- tgt_alias, hdp,
- tgt_info['iscsi_secret'])
- LOG.info(_LI("Set tgt CHAP secret for service: %s."),
- tgt_alias)
+ # CHAP supported and the target has no secret yet. So, the
+ # secret is created for the target
+ if tgt_info['iscsi_secret'] == "":
+ random_secret = utils.generate_password()[0:15]
+ tgt_info['iscsi_secret'] = random_secret
+
+ LOG.info(_LI("Set tgt CHAP secret for service: %(tgt)s."),
+ {'tgt': tgt_alias})
else:
# We set blank password when the client does not
# support CHAP. Later on, if the client tries to create a new
- # target that does not exists in the backend, we check for this
+ # target that does not exist in the backend, we check for this
# value and use a temporary dummy password.
if 'iscsi_secret' not in tgt_info.keys():
# Warns in the first time
LOG.info(_LI("CHAP authentication disabled."))
- tgt_info['iscsi_secret'] = ""
+ tgt_info['iscsi_secret'] = "''"
+
+ # If the target does not exist, it should be created
+ if not tgt['found']:
+ self.backend.create_target(tgt_alias, fs_label,
+ tgt_info['iscsi_secret'])
+ elif (tgt['tgt']['secret'] == "" and
+ self.config['chap_enabled'] == 'True'):
+ # The target exists, has no secret and chap is enabled
+ self.backend.set_target_secret(tgt_alias, fs_label,
+ tgt_info['iscsi_secret'])
if 'tgt_iqn' not in tgt_info:
- LOG.info(_LI("Retrieving target for service: %s."), tgt_alias)
+ LOG.info(_LI("Retrieving IQN for service: %(tgt)s."),
+ {'tgt': tgt_alias})
- out = self.bend.get_targetiqn(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'],
- tgt_alias, hdp,
- tgt_info['iscsi_secret'])
+ out = self.backend.get_target_iqn(tgt_alias, fs_label)
tgt_info['tgt_iqn'] = out
self.config['targets'][tgt_alias] = tgt_info
- service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'],
- svc['port'], hdp, tgt_alias, tgt_info['iscsi_secret'])
+ service = (svc['iscsi_ip'], svc['iscsi_port'], svc['evs'], svc['port'],
+ fs_label, tgt_alias, tgt_info['iscsi_secret'])
return service
def _get_stats(self):
- """Get HDP stats from HNAS."""
+ """Get FS stats from HNAS.
+ :returns: dictionary with the stats from HNAS
+ """
hnas_stat = {}
be_name = self.configuration.safe_get('volume_backend_name')
- hnas_stat["volume_backend_name"] = be_name or 'HDSISCSIDriver'
- hnas_stat["vendor_name"] = 'HDS'
- hnas_stat["driver_version"] = HDS_HNAS_ISCSI_VERSION
+ hnas_stat["volume_backend_name"] = be_name or 'HNASISCSIDriver'
+ hnas_stat["vendor_name"] = 'Hitachi'
+ hnas_stat["driver_version"] = HNAS_ISCSI_VERSION
hnas_stat["storage_protocol"] = 'iSCSI'
hnas_stat['reserved_percentage'] = 0
for pool in self.pools:
- out = self.bend.get_hdp_info(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'],
- pool['hdp'])
+ fs_info = self.backend.get_fs_info(pool['fs'])
- LOG.debug('Query for pool %(pool)s: %(out)s.',
- {'pool': pool['pool_name'], 'out': out})
-
- (hdp, size, _ign, used) = out.split()[1:5] # in MB
- pool['total_capacity_gb'] = int(size) / units.Ki
- pool['free_capacity_gb'] = (int(size) - int(used)) / units.Ki
- pool['allocated_capacity_gb'] = int(used) / units.Ki
+ pool['total_capacity_gb'] = (float(fs_info['total_size']))
+ pool['free_capacity_gb'] = (
+ float(fs_info['total_size']) - float(fs_info['used_size']))
+ pool['allocated_capacity_gb'] = (float(fs_info['total_size']))
pool['QoS_support'] = 'False'
pool['reserved_percentage'] = 0
hnas_stat['pools'] = self.pools
- LOG.info(_LI("stats: stats: %s."), hnas_stat)
+ LOG.info(_LI("stats: %(stat)s."), {'stat': hnas_stat})
return hnas_stat
- def _get_hdp_list(self):
- """Get HDPs from HNAS."""
+ def _check_fs_list(self):
+ """Verifies the FSs in HNAS array.
- out = self.bend.get_hdp_info(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'])
-
- hdp_list = []
- for line in out.split('\n'):
- if 'HDP' in line:
- inf = line.split()
- if int(inf[1]) >= units.Ki:
- # HDP fsids start at units.Ki (1024)
- hdp_list.append(inf[11])
- else:
- # HDP pools are 2-digits max
- hdp_list.extend(inf[1:2])
-
- # returns a list of HDP IDs
- LOG.info(_LI("HDP list: %s"), hdp_list)
- return hdp_list
-
- def _check_hdp_list(self):
- """Verify HDPs in HNAS array.
-
- Verify that all HDPs specified in the configuration files actually
+ Verify that all FSs specified in the configuration files actually
exists on the storage.
"""
-
- hdpl = self._get_hdp_list()
- lst = self.config['hdp'].keys()
-
- for hdp in lst:
- if hdp not in hdpl:
- LOG.error(_LE("HDP not found: %s"), hdp)
- err = "HDP not found: " + hdp
- raise exception.ParameterNotFound(param=err)
- # status, verify corresponding status is Normal
-
- def _id_to_vol(self, volume_id):
- """Given the volume id, retrieve the volume object from database.
-
- :param volume_id: volume id string
- """
-
- vol = self.db.volume_get(self.context, volume_id)
-
- return vol
-
- def _update_vol_location(self, volume_id, loc):
- """Update the provider location.
-
- :param volume_id: volume id string
- :param loc: string provider location value
- """
-
- update = {'provider_location': loc}
- self.db.volume_update(self.context, volume_id, update)
-
- def check_for_setup_error(self):
- """Returns an error if prerequisites aren't met."""
-
- pass
-
- def do_setup(self, context):
- """Setup and verify HDS HNAS storage connection."""
-
- self.context = context
- (self.arid, self.hnas_name, self.lumax) = self._array_info_get()
- self._check_hdp_list()
-
- service_list = self.config['services'].keys()
- for svc in service_list:
- svc = self.config['services'][svc]
- pool = {}
- pool['pool_name'] = svc['volume_type']
- pool['service_label'] = svc['volume_type']
- pool['hdp'] = svc['hdp']
-
- self.pools.append(pool)
-
- LOG.info(_LI("Configured pools: %s"), self.pools)
-
- iscsi_info = self._get_iscsi_info()
- LOG.info(_LI("do_setup: %s"), iscsi_info)
- for svc in self.config['services'].keys():
- svc_ip = self.config['services'][svc]['iscsi_ip']
- if svc_ip in iscsi_info.keys():
- LOG.info(_LI("iSCSI portal found for service: %s"), svc_ip)
- self.config['services'][svc]['port'] = \
- iscsi_info[svc_ip]['port']
- self.config['services'][svc]['ctl'] = iscsi_info[svc_ip]['ctl']
- self.config['services'][svc]['iscsi_port'] = \
- iscsi_info[svc_ip]['iscsi_port']
- else: # config iscsi address not found on device!
- LOG.error(_LE("iSCSI portal not found "
- "for service: %s"), svc_ip)
- raise exception.ParameterNotFound(param=svc_ip)
-
- def ensure_export(self, context, volume):
- pass
-
- def create_export(self, context, volume, connector):
- """Create an export. Moved to initialize_connection.
-
- :param context:
- :param volume: volume reference
- """
-
- name = volume['name']
- LOG.debug("create_export %s", name)
-
- pass
-
- def remove_export(self, context, volume):
- """Disconnect a volume from an attached instance.
-
- :param context: context
- :param volume: dictionary volume reference
- """
-
- provider = volume['provider_location']
- name = volume['name']
- LOG.debug("remove_export provider %(provider)s on %(name)s",
- {'provider': provider, 'name': name})
-
- pass
-
- def create_volume(self, volume):
- """Create a LU on HNAS.
-
- :param volume: dictionary volume reference
- """
-
- hdp = self._get_service(volume)
- out = self.bend.create_lu(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'],
- hdp,
- '%s' % (int(volume['size']) * units.Ki),
- volume['name'])
-
- LOG.info(_LI("create_volume: create_lu returns %s"), out)
-
- lun = self.arid + '.' + out.split()[1]
- sz = int(out.split()[5])
-
- # Example: 92210013.volume-44d7e29b-2aa4-4606-8bc4-9601528149fd
- LOG.info(_LI("LUN %(lun)s of size %(sz)s MB is created."),
- {'lun': lun, 'sz': sz})
- return {'provider_location': lun}
-
- def create_cloned_volume(self, dst, src):
- """Create a clone of a volume.
-
- :param dst: ditctionary destination volume reference
- :param src: ditctionary source volume reference
- """
-
- if src['size'] > dst['size']:
- msg = 'Clone volume size must not be smaller than source volume'
- raise exception.VolumeBackendAPIException(data=msg)
-
- hdp = self._get_service(dst)
- size = int(src['size']) * units.Ki
- source_vol = self._id_to_vol(src['id'])
- (arid, slun) = _loc_info(source_vol['provider_location'])['id_lu']
- out = self.bend.create_dup(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'],
- slun, hdp, '%s' % size,
- dst['name'])
-
- lun = self.arid + '.' + out.split()[1]
-
- if src['size'] < dst['size']:
- size = dst['size']
- self.extend_volume(dst, size)
- else:
- size = int(out.split()[5])
-
- LOG.debug("LUN %(lun)s of size %(size)s MB is cloned.",
- {'lun': lun, 'size': size})
- return {'provider_location': lun}
-
- def extend_volume(self, volume, new_size):
- """Extend an existing volume.
-
- :param volume: dictionary volume reference
- :param new_size: int size in GB to extend
- """
-
- hdp = self._get_service(volume)
- (arid, lun) = _loc_info(volume['provider_location'])['id_lu']
- self.bend.extend_vol(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'],
- hdp, lun,
- '%s' % (new_size * units.Ki),
- volume['name'])
-
- LOG.info(_LI("LUN %(lun)s extended to %(size)s GB."),
- {'lun': lun, 'size': new_size})
-
- def delete_volume(self, volume):
- """Delete an LU on HNAS.
-
- :param volume: dictionary volume reference
- """
-
- prov_loc = volume['provider_location']
- if prov_loc is None:
- LOG.error(_LE("delete_vol: provider location empty."))
- return
- info = _loc_info(prov_loc)
- (arid, lun) = info['id_lu']
- if 'tgt' in info.keys(): # connected?
- LOG.info(_LI("delete lun loc %s"), info['tgt'])
- # loc = id.lun
- (_portal, iqn, loc, ctl, port, hlun) = info['tgt']
- self.bend.del_iscsi_conn(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'],
- ctl, iqn, hlun)
-
- name = self.hnas_name
-
- LOG.debug("delete lun %(lun)s on %(name)s", {'lun': lun, 'name': name})
-
- hdp = self._get_service(volume)
- self.bend.delete_lu(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'],
- hdp, lun)
-
- @cinder_utils.synchronized('volume_mapping')
- def initialize_connection(self, volume, connector):
- """Map the created volume to connector['initiator'].
-
- :param volume: dictionary volume reference
- :param connector: dictionary connector reference
- """
-
- LOG.info(_LI("initialize volume %(vol)s connector %(conn)s"),
- {'vol': volume, 'conn': connector})
-
- # connector[ip, host, wwnns, unititator, wwp/
-
- service_info = self._get_service_target(volume)
- (ip, ipp, ctl, port, _hdp, tgtalias, secret) = service_info
- info = _loc_info(volume['provider_location'])
-
- if 'tgt' in info.keys(): # spurious repeat connection
- # print info.keys()
- LOG.debug("initiate_conn: tgt already set %s", info['tgt'])
- (arid, lun_name) = info['id_lu']
- loc = arid + '.' + lun_name
- # sps, use target if provided
- try:
- out = self.bend.add_iscsi_conn(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'],
- lun_name, _hdp, port, tgtalias,
- connector['initiator'])
- except processutils.ProcessExecutionError:
- msg = _("Error attaching volume %s. "
- "Target limit might be reached!") % volume['id']
- raise exception.ISCSITargetAttachFailed(message=msg)
-
- hnas_portal = ip + ':' + ipp
- # sps need hlun, fulliqn
- hlun = out.split()[1]
- fulliqn = out.split()[13]
- tgt = hnas_portal + ',' + tgtalias + ',' + loc + ',' + ctl + ','
- tgt += port + ',' + hlun
-
- LOG.info(_LI("initiate: connection %s"), tgt)
-
- properties = {}
- properties['provider_location'] = tgt
- self._update_vol_location(volume['id'], tgt)
- properties['target_discovered'] = False
- properties['target_portal'] = hnas_portal
- properties['target_iqn'] = fulliqn
- properties['target_lun'] = int(hlun)
- properties['volume_id'] = volume['id']
- properties['auth_username'] = connector['initiator']
-
- if self.config['chap_enabled'] == 'True':
- properties['auth_method'] = 'CHAP'
- properties['auth_password'] = secret
-
- conn_info = {'driver_volume_type': 'iscsi', 'data': properties}
- LOG.debug("initialize_connection: conn_info: %s.", conn_info)
- return conn_info
-
- @cinder_utils.synchronized('volume_mapping')
- def terminate_connection(self, volume, connector, **kwargs):
- """Terminate a connection to a volume.
-
- :param volume: dictionary volume reference
- :param connector: dictionary connector reference
- """
-
- info = _loc_info(volume['provider_location'])
- if 'tgt' not in info.keys(): # spurious disconnection
- LOG.warning(_LW("terminate_conn: provider location empty."))
- return
- (arid, lun) = info['id_lu']
- (_portal, tgtalias, loc, ctl, port, hlun) = info['tgt']
- LOG.info(_LI("terminate: connection %s"), volume['provider_location'])
- self.bend.del_iscsi_conn(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'],
- ctl, tgtalias, hlun)
- self._update_vol_location(volume['id'], loc)
-
- return {'provider_location': loc}
-
- def create_volume_from_snapshot(self, volume, snapshot):
- """Create a volume from a snapshot.
-
- :param volume: dictionary volume reference
- :param snapshot: dictionary snapshot reference
- """
-
- size = int(snapshot['volume_size']) * units.Ki
- (arid, slun) = _loc_info(snapshot['provider_location'])['id_lu']
- hdp = self._get_service(volume)
- out = self.bend.create_dup(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'],
- slun, hdp, '%s' % (size),
- volume['name'])
- lun = self.arid + '.' + out.split()[1]
- sz = int(out.split()[5])
-
- LOG.debug("LUN %(lun)s of size %(sz)s MB is created from snapshot.",
- {'lun': lun, 'sz': sz})
- return {'provider_location': lun}
-
- def create_snapshot(self, snapshot):
- """Create a snapshot.
-
- :param snapshot: dictionary snapshot reference
- """
-
- source_vol = self._id_to_vol(snapshot['volume_id'])
- hdp = self._get_service(source_vol)
- size = int(snapshot['volume_size']) * units.Ki
- (arid, slun) = _loc_info(source_vol['provider_location'])['id_lu']
- out = self.bend.create_dup(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'],
- slun, hdp,
- '%s' % (size),
- snapshot['name'])
- lun = self.arid + '.' + out.split()[1]
- size = int(out.split()[5])
-
- LOG.debug("LUN %(lun)s of size %(size)s MB is created.",
- {'lun': lun, 'size': size})
- return {'provider_location': lun}
-
- def delete_snapshot(self, snapshot):
- """Delete a snapshot.
-
- :param snapshot: dictionary snapshot reference
- """
-
- loc = snapshot['provider_location']
-
- # to take care of spurious input
- if loc is None:
- # which could cause exception.
- return
-
- (arid, lun) = loc.split('.')
- source_vol = self._id_to_vol(snapshot['volume_id'])
- hdp = self._get_service(source_vol)
- myid = self.arid
-
- if arid != myid:
- LOG.error(_LE("Array mismatch %(myid)s vs %(arid)s"),
- {'myid': myid, 'arid': arid})
- msg = 'Array id mismatch in delete snapshot'
- raise exception.VolumeBackendAPIException(data=msg)
- self.bend.delete_lu(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'],
- hdp, lun)
-
- LOG.debug("LUN %s is deleted.", lun)
- return
-
- def get_volume_stats(self, refresh=False):
- """Get volume stats. If 'refresh', run update the stats first."""
-
- if refresh:
- self.driver_stats = self._get_stats()
-
- return self.driver_stats
-
- def get_pool(self, volume):
-
- if not volume['volume_type']:
- return 'default'
- else:
- metadata = {}
- type_id = volume['volume_type_id']
- if type_id is not None:
- metadata = volume_types.get_volume_type_extra_specs(type_id)
- if not metadata.get('service_label'):
- return 'default'
- else:
- if metadata['service_label'] not in \
- self.config['services'].keys():
- return 'default'
- else:
- pass
- return metadata['service_label']
+ fs_list = self.config['fs'].keys()
+
+ for fs in fs_list:
+ if not self.backend.get_fs_info(fs):
+ msg = (
+ _("File system not found or not mounted: %(fs)s") %
+ {'fs': fs})
+ LOG.error(msg)
+ raise exception.ParameterNotFound(param=msg)
def _check_pool_and_fs(self, volume, fs_label):
- """Validation of the pool and filesystem.
+ """Validates pool and file system of a volume being managed.
Checks if the file system for the volume-type chosen matches the
one passed in the volume reference. Also, checks if the pool
@@ -867,10 +286,11 @@ class HDSISCSIDriver(driver.ISCSIDriver):
:param volume: Reference to the volume.
:param fs_label: Label of the file system.
+ :raises: ManageExistingVolumeTypeMismatch
"""
- pool_from_vol_type = self.get_pool(volume)
+ pool_from_vol_type = hnas_utils.get_pool(self.config, volume)
- pool_from_host = utils.extract_host(volume['host'], level='pool')
+ pool_from_host = utils.extract_host(volume.host, level='pool')
if self.config['services'][pool_from_vol_type]['hdp'] != fs_label:
msg = (_("Failed to manage existing volume because the pool of "
@@ -896,6 +316,8 @@ class HDSISCSIDriver(driver.ISCSIDriver):
the volume reference.
:param vol_ref: existing volume to take under management
+ :returns: the file system label and the volume name or raises error
+ :raises: ManageExistingInvalidReference
"""
vol_info = vol_ref.strip().split('/')
@@ -911,44 +333,260 @@ class HDSISCSIDriver(driver.ISCSIDriver):
raise exception.ManageExistingInvalidReference(
existing_ref=vol_ref, reason=msg)
+ def check_for_setup_error(self):
+ pass
+
+ def do_setup(self, context):
+ """Sets up and verify Hitachi HNAS storage connection."""
+ self.context = context
+ self._check_fs_list()
+
+ service_list = self.config['services'].keys()
+ for svc in service_list:
+ svc = self.config['services'][svc]
+ pool = {}
+ pool['pool_name'] = svc['volume_type']
+ pool['service_label'] = svc['volume_type']
+ pool['fs'] = svc['hdp']
+
+ self.pools.append(pool)
+
+ LOG.info(_LI("Configured pools: %(pool)s"), {'pool': self.pools})
+
+ evs_info = self.backend.get_evs_info()
+ LOG.info(_LI("Configured EVSs: %(evs)s"), {'evs': evs_info})
+
+ for svc in self.config['services'].keys():
+ svc_ip = self.config['services'][svc]['iscsi_ip']
+ if svc_ip in evs_info.keys():
+ LOG.info(_LI("iSCSI portal found for service: %s"), svc_ip)
+ self.config['services'][svc]['evs'] = (
+ evs_info[svc_ip]['evs_number'])
+ self.config['services'][svc]['iscsi_port'] = '3260'
+ self.config['services'][svc]['port'] = '0'
+ else:
+ LOG.error(_LE("iSCSI portal not found "
+ "for service: %(svc)s"), {'svc': svc_ip})
+ raise exception.InvalidParameterValue(err=svc_ip)
+
+ def ensure_export(self, context, volume):
+ pass
+
+ def create_export(self, context, volume, connector):
+ pass
+
+ def remove_export(self, context, volume):
+ pass
+
+ def create_volume(self, volume):
+ """Creates a LU on HNAS.
+
+ :param volume: dictionary volume reference
+ :returns: the volume provider location
+ """
+ fs = self._get_service(volume)
+ size = six.text_type(volume.size)
+
+ self.backend.create_lu(fs, size, volume.name)
+
+ LOG.info(_LI("LU %(lu)s of size %(sz)s GB is created."),
+ {'lu': volume.name, 'sz': volume.size})
+
+ return {'provider_location': self._get_provider_location(volume)}
+
+ def create_cloned_volume(self, dst, src):
+ """Creates a clone of a volume.
+
+ :param dst: dictionary destination volume reference
+ :param src: dictionary source volume reference
+ :returns: the provider location of the extended volume
+ """
+ fs_label = self._get_service(dst)
+
+ self.backend.create_cloned_lu(src.name, fs_label, dst.name)
+
+ if src.size < dst.size:
+ size = dst.size
+ self.extend_volume(dst, size)
+
+ LOG.debug("LU %(lu)s of size %(size)d GB is cloned.",
+ {'lu': src.name, 'size': src.size})
+
+ return {'provider_location': self._get_provider_location(dst)}
+
+ def extend_volume(self, volume, new_size):
+ """Extends an existing volume.
+
+ :param volume: dictionary volume reference
+ :param new_size: int size in GB to extend
+ """
+ fs = self._get_service(volume)
+ self.backend.extend_lu(fs, new_size, volume.name)
+
+ LOG.info(_LI("LU %(lu)s extended to %(size)s GB."),
+ {'lu': volume.name, 'size': new_size})
+
+ def delete_volume(self, volume):
+ """Deletes the volume on HNAS.
+
+ :param volume: dictionary volume reference
+ """
+ fs = self._get_service(volume)
+ self.backend.delete_lu(fs, volume.name)
+
+ LOG.debug("Delete LU %(lu)s", {'lu': volume.name})
+
+ @cinder_utils.synchronized('volume_mapping')
+ def initialize_connection(self, volume, connector):
+ """Maps the created volume to connector['initiator'].
+
+ :param volume: dictionary volume reference
+ :param connector: dictionary connector reference
+ :returns: The connection information
+ :raises: ISCSITargetAttachFailed
+ """
+ LOG.info(_LI("initialize volume %(vol)s connector %(conn)s"),
+ {'vol': volume, 'conn': connector})
+
+ service_info = self._get_service_target(volume)
+ (ip, ipp, evs, port, _fs, tgtalias, secret) = service_info
+
+ try:
+ conn = self.backend.add_iscsi_conn(volume.name, _fs, port,
+ tgtalias,
+ connector['initiator'])
+
+ except processutils.ProcessExecutionError:
+ msg = (_("Error attaching volume %(vol)s. "
+ "Target limit might be reached!") % {'vol': volume.id})
+ raise exception.ISCSITargetAttachFailed(message=msg)
+
+ hnas_portal = ip + ':' + ipp
+ lu_id = six.text_type(conn['lu_id'])
+ fulliqn = conn['iqn']
+ tgt = (hnas_portal + ',' + tgtalias + ',' +
+ volume.provider_location + ',' + evs + ',' +
+ port + ',' + lu_id)
+
+ LOG.info(_LI("initiate: connection %s"), tgt)
+
+ properties = {}
+ properties['provider_location'] = tgt
+ properties['target_discovered'] = False
+ properties['target_portal'] = hnas_portal
+ properties['target_iqn'] = fulliqn
+ properties['target_lu'] = int(lu_id)
+ properties['volume_id'] = volume.id
+ properties['auth_username'] = connector['initiator']
+
+ if self.config['chap_enabled'] == 'True':
+ properties['auth_method'] = 'CHAP'
+ properties['auth_password'] = secret
+
+ conn_info = {'driver_volume_type': 'iscsi', 'data': properties}
+ LOG.debug("initialize_connection: conn_info: %(conn)s.",
+ {'conn': conn_info})
+
+ return conn_info
+
+ @cinder_utils.synchronized('volume_mapping')
+ def terminate_connection(self, volume, connector, **kwargs):
+ """Terminate a connection to a volume.
+
+ :param volume: dictionary volume reference
+ :param connector: dictionary connector reference
+ """
+ service_info = self._get_service_target(volume)
+ (ip, ipp, evs, port, fs, tgtalias, secret) = service_info
+ lu_info = self.backend.check_lu(volume.name, fs)
+
+ self.backend.del_iscsi_conn(evs, tgtalias, lu_info['id'])
+
+ LOG.info(_LI("terminate_connection: %(vol)s"),
+ {'vol': volume.provider_location})
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ """Creates a volume from a snapshot.
+
+ :param volume: dictionary volume reference
+ :param snapshot: dictionary snapshot reference
+ :returns: the provider location of the snapshot
+ """
+ fs = self._get_service(volume)
+
+ self.backend.create_cloned_lu(snapshot.name, fs, volume.name)
+
+ LOG.info(_LI("LU %(lu)s of size %(sz)d MB is created."),
+ {'lu': snapshot.name, 'sz': snapshot.volume_size})
+
+ return {'provider_location': self._get_provider_location(snapshot)}
+
+ def create_snapshot(self, snapshot):
+ """Creates a snapshot.
+
+ :param snapshot: dictionary snapshot reference
+ :returns: the provider location of the snapshot
+ """
+ fs = self._get_service(snapshot.volume)
+
+ self.backend.create_cloned_lu(snapshot.volume_name, fs, snapshot.name)
+
+ LOG.debug("LU %(lu)s of size %(size)d GB is created.",
+ {'lu': snapshot.name, 'size': snapshot.volume_size})
+
+ return {'provider_location': self._get_provider_location(snapshot)}
+
+ def delete_snapshot(self, snapshot):
+ """Deletes a snapshot.
+
+ :param snapshot: dictionary snapshot reference
+ """
+ fs = self._get_service(snapshot.volume)
+ self.backend.delete_lu(fs, snapshot.name)
+
+ LOG.debug("Delete lu %(lu)s", {'lu': snapshot.name})
+
+ def get_volume_stats(self, refresh=False):
+ """Gets the volume driver stats.
+
+ :param refresh: if refresh is True, the driver_stats is updated
+ :returns: the driver stats
+ """
+ if refresh:
+ self.driver_stats = self._get_stats()
+
+ return self.driver_stats
+
def manage_existing_get_size(self, volume, existing_vol_ref):
"""Gets the size to manage_existing.
Returns the size of volume to be managed by manage_existing.
- :param volume: cinder volume to manage
+ :param volume: cinder volume to manage
:param existing_vol_ref: existing volume to take under management
+ :returns: the size of the volume to be managed or raises error
+ :raises: ManageExistingInvalidReference
"""
- # Check that the reference is valid.
+ # Check if the reference is valid.
if 'source-name' not in existing_vol_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_vol_ref, reason=reason)
- ref_name = existing_vol_ref['source-name']
- fs_label, vol_name = self._get_info_from_vol_ref(ref_name)
+ fs_label, vol_name = (
+ self._get_info_from_vol_ref(existing_vol_ref['source-name']))
LOG.debug("File System: %(fs_label)s "
"Volume name: %(vol_name)s.",
{'fs_label': fs_label, 'vol_name': vol_name})
- vol_name = "'{}'".format(vol_name)
+ if utils.check_already_managed_volume(vol_name):
+ raise exception.ManageExistingAlreadyManaged(volume_ref=vol_name)
- lu_info = self.bend.get_existing_lu_info(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'],
- fs_label, vol_name)
+ lu_info = self.backend.get_existing_lu_info(vol_name, fs_label)
- if fs_label in lu_info:
- aux = lu_info.split('\n')[3]
- size = aux.split(':')[1]
- size_unit = size.split(' ')[2]
-
- if size_unit == 'TB':
- return int(size.split(' ')[1]) * units.k
- else:
- return int(size.split(' ')[1])
+ if lu_info != {}:
+ return lu_info['size']
else:
raise exception.ManageExistingInvalidReference(
existing_ref=existing_vol_ref,
@@ -966,54 +604,51 @@ class HDSISCSIDriver(driver.ISCSIDriver):
e.g., openstack/vol_to_manage
:param volume: cinder volume to manage
- :param existing_vol_ref: driver-specific information used to identify a
+ :param existing_vol_ref: driver specific information used to identify a
volume
+ :returns: the provider location of the volume managed
"""
- ref_name = existing_vol_ref['source-name']
- fs_label, vol_name = self._get_info_from_vol_ref(ref_name)
+ fs_label, vol_name = (
+ self._get_info_from_vol_ref(existing_vol_ref['source-name']))
LOG.debug("Asked to manage ISCSI volume %(vol)s, with vol "
- "ref %(ref)s.", {'vol': volume['id'],
+ "ref %(ref)s.", {'vol': volume.id,
'ref': existing_vol_ref['source-name']})
- self._check_pool_and_fs(volume, fs_label)
+ if volume.volume_type is not None:
+ self._check_pool_and_fs(volume, fs_label)
- vol_name = "'{}'".format(vol_name)
-
- self.bend.rename_existing_lu(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'], fs_label,
- volume['name'], vol_name)
+ self.backend.rename_existing_lu(fs_label, vol_name, volume.name)
LOG.info(_LI("Set newly managed Cinder volume name to %(name)s."),
- {'name': volume['name']})
+ {'name': volume.name})
- lun = self.arid + '.' + volume['name']
-
- return {'provider_location': lun}
+ return {'provider_location': self._get_provider_location(volume)}
def unmanage(self, volume):
"""Unmanages a volume from cinder.
Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object. A log entry
- will be made to notify the Admin that the volume is no longer being
+ will be made to notify the admin that the volume is no longer being
managed.
:param volume: cinder volume to unmanage
"""
- svc = self._get_service(volume)
+ fslabel = self._get_service(volume)
+ new_name = 'unmanage-' + volume.name
+ vol_path = fslabel + '/' + volume.name
- new_name = 'unmanage-' + volume['name']
- vol_path = svc + '/' + volume['name']
-
- self.bend.rename_existing_lu(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'], svc, new_name,
- volume['name'])
+ self.backend.rename_existing_lu(fslabel, volume.name, new_name)
LOG.info(_LI("Cinder ISCSI volume with current path %(path)s is "
"no longer being managed. The new name is %(unm)s."),
{'path': vol_path, 'unm': new_name})
+
+ def _get_provider_location(self, volume):
+ """Gets the provider location of a given volume
+
+ :param volume: dictionary volume reference
+ :returns: the provider_location related to the volume
+ """
+ return self.backend.get_version()['mac'] + '.' + volume.name
diff --git a/cinder/volume/drivers/hitachi/hnas_nfs.py b/cinder/volume/drivers/hitachi/hnas_nfs.py
index c79c89a9243..951c522ffc8 100644
--- a/cinder/volume/drivers/hitachi/hnas_nfs.py
+++ b/cinder/volume/drivers/hitachi/hnas_nfs.py
@@ -14,21 +14,18 @@
# under the License.
"""
-Volume driver for HDS HNAS NFS storage.
+Volume driver for HNAS NFS storage.
"""
import math
import os
-import re
-import six
import socket
-import time
-from xml.etree import ElementTree as ETree
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
+import six
from cinder import exception
from cinder.i18n import _, _LE, _LI
@@ -36,19 +33,19 @@ from cinder.image import image_utils
from cinder import interface
from cinder import utils as cutils
from cinder.volume.drivers.hitachi import hnas_backend
+from cinder.volume.drivers.hitachi import hnas_utils
from cinder.volume.drivers import nfs
from cinder.volume import utils
-from cinder.volume import volume_types
-HDS_HNAS_NFS_VERSION = '4.1.0'
+HNAS_NFS_VERSION = '5.0.0'
LOG = logging.getLogger(__name__)
NFS_OPTS = [
cfg.StrOpt('hds_hnas_nfs_config_file',
default='/opt/hds/hnas/cinder_nfs_conf.xml',
- help='Configuration file for HDS NFS cinder plugin'), ]
+ help='Configuration file for HNAS NFS cinder plugin'), ]
CONF = cfg.CONF
CONF.register_opts(NFS_OPTS)
@@ -56,151 +53,46 @@ CONF.register_opts(NFS_OPTS)
HNAS_DEFAULT_CONFIG = {'hnas_cmd': 'ssc', 'ssh_port': '22'}
-def _xml_read(root, element, check=None):
- """Read an xml element."""
-
- val = root.findtext(element)
-
- # mandatory parameter not found
- if val is None and check:
- raise exception.ParameterNotFound(param=element)
-
- # tag not found
- if val is None:
- return None
-
- svc_tag_pattern = re.compile("svc_.$")
- # tag found but empty parameter.
- if not val.strip():
- if svc_tag_pattern.search(element):
- return ""
- raise exception.ParameterNotFound(param=element)
-
- LOG.debug(_LI("%(element)s: %(val)s"),
- {'element': element,
- 'val': val if element != 'password' else '***'})
-
- return val.strip()
-
-
-def _read_config(xml_config_file):
- """Read hds driver specific xml config file.
-
- :param xml_config_file: string filename containing XML configuration
- """
-
- if not os.access(xml_config_file, os.R_OK):
- msg = (_("Can't open config file: %s") % xml_config_file)
- raise exception.NotFound(message=msg)
-
- try:
- root = ETree.parse(xml_config_file).getroot()
- except Exception:
- msg = (_("Error parsing config file: %s") % xml_config_file)
- raise exception.ConfigNotFound(message=msg)
-
- # mandatory parameters
- config = {}
- arg_prereqs = ['mgmt_ip0', 'username']
- for req in arg_prereqs:
- config[req] = _xml_read(root, req, True)
-
- # optional parameters
- opt_parameters = ['hnas_cmd', 'ssh_enabled', 'cluster_admin_ip0']
- for req in opt_parameters:
- config[req] = _xml_read(root, req)
-
- if config['ssh_enabled'] == 'True':
- config['ssh_private_key'] = _xml_read(root, 'ssh_private_key', True)
- config['password'] = _xml_read(root, 'password')
- config['ssh_port'] = _xml_read(root, 'ssh_port')
- if config['ssh_port'] is None:
- config['ssh_port'] = HNAS_DEFAULT_CONFIG['ssh_port']
- else:
- # password is mandatory when not using SSH
- config['password'] = _xml_read(root, 'password', True)
-
- if config['hnas_cmd'] is None:
- config['hnas_cmd'] = HNAS_DEFAULT_CONFIG['hnas_cmd']
-
- config['hdp'] = {}
- config['services'] = {}
-
- # min one needed
- for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']:
- if _xml_read(root, svc) is None:
- continue
- service = {'label': svc}
-
- # none optional
- for arg in ['volume_type', 'hdp']:
- service[arg] = _xml_read(root, svc + '/' + arg, True)
- config['services'][service['volume_type']] = service
- config['hdp'][service['hdp']] = service['hdp']
-
- # at least one service required!
- if config['services'].keys() is None:
- raise exception.ParameterNotFound(param="No service found")
-
- return config
-
-
-def factory_bend(drv_config):
- """Factory over-ride in self-tests."""
-
- return hnas_backend.HnasBackend(drv_config)
-
-
@interface.volumedriver
-class HDSNFSDriver(nfs.NfsDriver):
+class HNASNFSDriver(nfs.NfsDriver):
"""Base class for Hitachi NFS driver.
Executes commands relating to Volumes.
- .. code-block:: none
+ Version history:
+
+ .. code-block:: none
Version 1.0.0: Initial driver version
Version 2.2.0: Added support to SSH authentication
Version 3.0.0: Added pool aware scheduling
Version 4.0.0: Added manage/unmanage features
Version 4.1.0: Fixed XML parser checks on blank options
+ Version 5.0.0: Remove looping in driver initialization
+ Code cleaning up
+ New communication interface between the driver and HNAS
+ Removed the option to use local SSC (ssh_enabled=False)
+ Updated to use versioned objects
+ Changed the class name to HNASNFSDriver
"""
def __init__(self, *args, **kwargs):
- # NOTE(vish): db is set by Manager
self._execute = None
self.context = None
self.configuration = kwargs.get('configuration', None)
+ service_parameters = ['volume_type', 'hdp']
+ optional_parameters = ['hnas_cmd', 'cluster_admin_ip0']
+
if self.configuration:
self.configuration.append_config_values(NFS_OPTS)
- self.config = _read_config(
- self.configuration.hds_hnas_nfs_config_file)
+ self.config = hnas_utils.read_config(
+ self.configuration.hds_hnas_nfs_config_file,
+ service_parameters,
+ optional_parameters)
- super(HDSNFSDriver, self).__init__(*args, **kwargs)
- self.bend = factory_bend(self.config)
-
- def _array_info_get(self):
- """Get array parameters."""
-
- out = self.bend.get_version(self.config['hnas_cmd'],
- HDS_HNAS_NFS_VERSION,
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'])
-
- inf = out.split()
- return inf[1], 'nfs_' + inf[1], inf[6]
-
- def _id_to_vol(self, volume_id):
- """Given the volume id, retrieve the volume object from database.
-
- :param volume_id: string volume id
- """
-
- vol = self.db.volume_get(self.context, volume_id)
-
- return vol
+ super(HNASNFSDriver, self).__init__(*args, **kwargs)
+ self.backend = hnas_backend.HNASSSHBackend(self.config)
def _get_service(self, volume):
"""Get service parameters.
@@ -209,21 +101,24 @@ class HDSNFSDriver(nfs.NfsDriver):
its type.
:param volume: dictionary volume reference
+ :returns: Tuple containing the service parameters (label,
+ export path and export file system) or error if no configuration is
+ found.
+ :raises: ParameterNotFound
"""
-
- LOG.debug("_get_service: volume: %s", volume)
- label = utils.extract_host(volume['host'], level='pool')
+ LOG.debug("_get_service: volume: %(vol)s", {'vol': volume})
+ label = utils.extract_host(volume.host, level='pool')
if label in self.config['services'].keys():
svc = self.config['services'][label]
- LOG.info(_LI("Get service: %(lbl)s->%(svc)s"),
- {'lbl': label, 'svc': svc['fslabel']})
- service = (svc['hdp'], svc['path'], svc['fslabel'])
+ LOG.info(_LI("_get_service: %(lbl)s->%(svc)s"),
+ {'lbl': label, 'svc': svc['export']['fs']})
+ service = (svc['hdp'], svc['export']['path'], svc['export']['fs'])
else:
- LOG.info(_LI("Available services: %s"),
- self.config['services'].keys())
- LOG.error(_LE("No configuration found for service: %s"),
- label)
+ LOG.info(_LI("Available services: %(svc)s"),
+ {'svc': self.config['services'].keys()})
+ LOG.error(_LE("No configuration found for service: %(lbl)s"),
+ {'lbl': label})
raise exception.ParameterNotFound(param=label)
return service
@@ -233,30 +128,26 @@ class HDSNFSDriver(nfs.NfsDriver):
:param volume: dictionary volume reference
:param new_size: int size in GB to extend
+ :raises: InvalidResults
"""
-
- nfs_mount = self._get_provider_location(volume['id'])
- path = self._get_volume_path(nfs_mount, volume['name'])
+ nfs_mount = volume.provider_location
+ path = self._get_volume_path(nfs_mount, volume.name)
# Resize the image file on share to new size.
LOG.debug("Checking file for resize")
- if self._is_file_size_equal(path, new_size):
- return
- else:
- LOG.info(_LI("Resizing file to %sG"), new_size)
+ if not self._is_file_size_equal(path, new_size):
+ LOG.info(_LI("Resizing file to %(sz)sG"), {'sz': new_size})
image_utils.resize_image(path, new_size)
- if self._is_file_size_equal(path, new_size):
- LOG.info(_LI("LUN %(id)s extended to %(size)s GB."),
- {'id': volume['id'], 'size': new_size})
- return
- else:
- raise exception.InvalidResults(
- _("Resizing image file failed."))
+
+ if self._is_file_size_equal(path, new_size):
+ LOG.info(_LI("LUN %(id)s extended to %(size)s GB."),
+ {'id': volume.id, 'size': new_size})
+ else:
+ raise exception.InvalidResults(_("Resizing image file failed."))
def _is_file_size_equal(self, path, size):
"""Checks if file size at path is equal to size."""
-
data = image_utils.qemu_img_info(path)
virt_size = data.virtual_size / units.Gi
@@ -266,22 +157,16 @@ class HDSNFSDriver(nfs.NfsDriver):
return False
def create_volume_from_snapshot(self, volume, snapshot):
- """Creates a volume from a snapshot."""
+ """Creates a volume from a snapshot.
- LOG.debug("create_volume_from %s", volume)
- vol_size = volume['size']
- snap_size = snapshot['volume_size']
+ :param volume: volume to be created
+ :param snapshot: source snapshot
+ :returns: the provider_location of the volume created
+ """
+ LOG.debug("create_volume_from %(vol)s", {'vol': volume})
- if vol_size != snap_size:
- msg = _("Cannot create volume of size %(vol_size)s from "
- "snapshot of size %(snap_size)s")
- msg_fmt = {'vol_size': vol_size, 'snap_size': snap_size}
- raise exception.CinderException(msg % msg_fmt)
-
- self._clone_volume(snapshot['name'],
- volume['name'],
- snapshot['volume_id'])
- share = self._get_volume_location(snapshot['volume_id'])
+ self._clone_volume(snapshot.volume, volume.name, snapshot.name)
+ share = snapshot.volume.provider_location
return {'provider_location': share}
@@ -289,13 +174,12 @@ class HDSNFSDriver(nfs.NfsDriver):
"""Create a snapshot.
:param snapshot: dictionary snapshot reference
+ :returns: the provider_location of the snapshot created
"""
+ self._clone_volume(snapshot.volume, snapshot.name)
- self._clone_volume(snapshot['volume_name'],
- snapshot['name'],
- snapshot['volume_id'])
- share = self._get_volume_location(snapshot['volume_id'])
- LOG.debug('Share: %s', share)
+ share = snapshot.volume.provider_location
+ LOG.debug('Share: %(shr)s', {'shr': share})
# returns the mount point (not path)
return {'provider_location': share}
@@ -306,133 +190,81 @@ class HDSNFSDriver(nfs.NfsDriver):
:param snapshot: dictionary snapshot reference
"""
- nfs_mount = self._get_provider_location(snapshot['volume_id'])
+ nfs_mount = snapshot.volume.provider_location
- if self._volume_not_present(nfs_mount, snapshot['name']):
+ if self._volume_not_present(nfs_mount, snapshot.name):
return True
- self._execute('rm', self._get_volume_path(nfs_mount, snapshot['name']),
+ self._execute('rm', self._get_volume_path(nfs_mount, snapshot.name),
run_as_root=True)
- def _get_volume_location(self, volume_id):
- """Returns NFS mount address as :.
-
- :param volume_id: string volume id
- """
-
- nfs_server_ip = self._get_host_ip(volume_id)
- export_path = self._get_export_path(volume_id)
-
- return nfs_server_ip + ':' + export_path
-
- def _get_provider_location(self, volume_id):
- """Returns provider location for given volume.
-
- :param volume_id: string volume id
- """
-
- volume = self.db.volume_get(self.context, volume_id)
-
- # same format as _get_volume_location
- return volume.provider_location
-
- def _get_host_ip(self, volume_id):
- """Returns IP address for the given volume.
-
- :param volume_id: string volume id
- """
-
- return self._get_provider_location(volume_id).split(':')[0]
-
- def _get_export_path(self, volume_id):
- """Returns NFS export path for the given volume.
-
- :param volume_id: string volume id
- """
-
- return self._get_provider_location(volume_id).split(':')[1]
-
def _volume_not_present(self, nfs_mount, volume_name):
- """Check if volume exists.
+ """Check if volume does not exist.
+ :param nfs_mount: string path of the nfs share
:param volume_name: string volume name
+ :returns: boolean (true for volume not present and false otherwise)
"""
-
try:
- self._try_execute('ls', self._get_volume_path(nfs_mount,
- volume_name))
+ self._try_execute('ls',
+ self._get_volume_path(nfs_mount, volume_name))
except processutils.ProcessExecutionError:
# If the volume isn't present
return True
return False
- def _try_execute(self, *command, **kwargs):
- # NOTE(vish): Volume commands can partially fail due to timing, but
- # running them a second time on failure will usually
- # recover nicely.
- tries = 0
- while True:
- try:
- self._execute(*command, **kwargs)
- return True
- except processutils.ProcessExecutionError:
- tries += 1
- if tries >= self.configuration.num_shell_tries:
- raise
- LOG.exception(_LE("Recovering from a failed execute. "
- "Try number %s"), tries)
- time.sleep(tries ** 2)
-
def _get_volume_path(self, nfs_share, volume_name):
"""Get volume path (local fs path) for given name on given nfs share.
:param nfs_share string, example 172.18.194.100:/var/nfs
:param volume_name string,
- example volume-91ee65ec-c473-4391-8c09-162b00c68a8c
+ example volume-91ee65ec-c473-4391-8c09-162b00c68a8c
+ :returns: the local path according to the parameters
"""
-
return os.path.join(self._get_mount_point_for_share(nfs_share),
volume_name)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume.
- :param volume: dictionary volume reference
- :param src_vref: dictionary src_vref reference
+ :param volume: reference to the volume being created
+ :param src_vref: reference to the source volume
+ :returns: the provider_location of the cloned volume
"""
+ vol_size = volume.size
+ src_vol_size = src_vref.size
- vol_size = volume['size']
- src_vol_size = src_vref['size']
+ self._clone_volume(src_vref, volume.name, src_vref.name)
- if vol_size < src_vol_size:
- msg = _("Cannot create clone of size %(vol_size)s from "
- "volume of size %(src_vol_size)s")
- msg_fmt = {'vol_size': vol_size, 'src_vol_size': src_vol_size}
- raise exception.CinderException(msg % msg_fmt)
-
- self._clone_volume(src_vref['name'], volume['name'], src_vref['id'])
+ share = src_vref.provider_location
if vol_size > src_vol_size:
+ volume.provider_location = share
self.extend_volume(volume, vol_size)
- share = self._get_volume_location(src_vref['id'])
-
return {'provider_location': share}
def get_volume_stats(self, refresh=False):
"""Get volume stats.
- if 'refresh' is True, update the stats first.
+ :param refresh: if it is True, update the stats first.
+ :returns: dictionary with the stats from HNAS
+ _stats['pools']={
+ 'total_capacity_gb': total size of the pool,
+ 'free_capacity_gb': the available size,
+ 'allocated_capacity_gb': current allocated size,
+ 'QoS_support': bool to indicate if QoS is supported,
+ 'reserved_percentage': percentage of size reserved
+ }
"""
-
- _stats = super(HDSNFSDriver, self).get_volume_stats(refresh)
- _stats["vendor_name"] = 'HDS'
- _stats["driver_version"] = HDS_HNAS_NFS_VERSION
+ _stats = super(HNASNFSDriver, self).get_volume_stats(refresh)
+ _stats["vendor_name"] = 'Hitachi'
+ _stats["driver_version"] = HNAS_NFS_VERSION
_stats["storage_protocol"] = 'NFS'
for pool in self.pools:
- capacity, free, used = self._get_capacity_info(pool['hdp'])
+ capacity, free, used = self._get_capacity_info(pool['fs'])
pool['total_capacity_gb'] = capacity / float(units.Gi)
pool['free_capacity_gb'] = free / float(units.Gi)
pool['allocated_capacity_gb'] = used / float(units.Gi)
@@ -441,79 +273,61 @@ class HDSNFSDriver(nfs.NfsDriver):
_stats['pools'] = self.pools
- LOG.info(_LI('Driver stats: %s'), _stats)
+ LOG.info(_LI('Driver stats: %(stat)s'), {'stat': _stats})
return _stats
- def _get_nfs_info(self):
- out = self.bend.get_nfs_info(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'])
- lines = out.split('\n')
-
- # dict based on NFS exports addresses
- conf = {}
- for line in lines:
- if 'Export' in line:
- inf = line.split()
- (export, path, fslabel, hdp, ip1) = \
- inf[1], inf[3], inf[5], inf[7], inf[11]
- # 9, 10, etc are IP addrs
- key = ip1 + ':' + export
- conf[key] = {}
- conf[key]['path'] = path
- conf[key]['hdp'] = hdp
- conf[key]['fslabel'] = fslabel
- LOG.info(_LI("nfs_info: %(key)s: %(path)s, HDP: %(fslabel)s "
- "FSID: %(hdp)s"),
- {'key': key, 'path': path,
- 'fslabel': fslabel, 'hdp': hdp})
-
- return conf
-
def do_setup(self, context):
"""Perform internal driver setup."""
+ version_info = self.backend.get_version()
+ LOG.info(_LI("HNAS Array NFS driver"))
+ LOG.info(_LI("HNAS model: %s"), version_info['model'])
+ LOG.info(_LI("HNAS version: %s"), version_info['version'])
+ LOG.info(_LI("HNAS hardware: %s"), version_info['hardware'])
+ LOG.info(_LI("HNAS S/N: %s"), version_info['serial'])
self.context = context
- self._load_shares_config(getattr(self.configuration,
- self.driver_prefix +
- '_shares_config'))
- LOG.info(_LI("Review shares: %s"), self.shares)
+ self._load_shares_config(
+ getattr(self.configuration, self.driver_prefix + '_shares_config'))
+ LOG.info(_LI("Review shares: %(shr)s"), {'shr': self.shares})
- nfs_info = self._get_nfs_info()
+ elist = self.backend.get_export_list()
- LOG.debug("nfs_info: %s", nfs_info)
+ # Check for all configured exports
+ for svc_name, svc_info in self.config['services'].items():
+ server_ip = svc_info['hdp'].split(':')[0]
+ mountpoint = svc_info['hdp'].split(':')[1]
- for share in self.shares:
- if share in nfs_info.keys():
- LOG.info(_LI("share: %(share)s -> %(info)s"),
- {'share': share, 'info': nfs_info[share]['path']})
+ # Ensure export are configured in HNAS
+ export_configured = False
+ for export in elist:
+ if mountpoint == export['name'] and server_ip in export['evs']:
+ svc_info['export'] = export
+ export_configured = True
- for svc in self.config['services'].keys():
- if share == self.config['services'][svc]['hdp']:
- self.config['services'][svc]['path'] = \
- nfs_info[share]['path']
- # don't overwrite HDP value
- self.config['services'][svc]['fsid'] = \
- nfs_info[share]['hdp']
- self.config['services'][svc]['fslabel'] = \
- nfs_info[share]['fslabel']
- LOG.info(_LI("Save service info for"
- " %(svc)s -> %(hdp)s, %(path)s"),
- {'svc': svc, 'hdp': nfs_info[share]['hdp'],
- 'path': nfs_info[share]['path']})
- break
- if share != self.config['services'][svc]['hdp']:
- LOG.error(_LE("NFS share %(share)s has no service entry:"
- " %(svc)s -> %(hdp)s"),
- {'share': share, 'svc': svc,
- 'hdp': self.config['services'][svc]['hdp']})
- raise exception.ParameterNotFound(param=svc)
- else:
- LOG.info(_LI("share: %s incorrect entry"), share)
+ # Ensure export are reachable
+ try:
+ out, err = self._execute('showmount', '-e', server_ip)
+ except processutils.ProcessExecutionError:
+ LOG.error(_LE("NFS server %(srv)s not reachable!"),
+ {'srv': server_ip})
+ raise
- LOG.debug("self.config['services'] = %s", self.config['services'])
+ export_list = out.split('\n')[1:]
+ export_list.pop()
+ mountpoint_not_found = mountpoint not in map(
+ lambda x: x.split()[0], export_list)
+ if (len(export_list) < 1 or
+ mountpoint_not_found or
+ not export_configured):
+ LOG.error(_LE("Configured share %(share)s is not present"
+ "in %(srv)s."),
+ {'share': mountpoint, 'srv': server_ip})
+ msg = _('Section: %s') % svc_name
+ raise exception.InvalidParameterValue(err=msg)
+
+ LOG.debug("Loading services: %(svc)s", {
+ 'svc': self.config['services']})
service_list = self.config['services'].keys()
for svc in service_list:
@@ -521,74 +335,57 @@ class HDSNFSDriver(nfs.NfsDriver):
pool = {}
pool['pool_name'] = svc['volume_type']
pool['service_label'] = svc['volume_type']
- pool['hdp'] = svc['hdp']
+ pool['fs'] = svc['hdp']
self.pools.append(pool)
- LOG.info(_LI("Configured pools: %s"), self.pools)
+ LOG.info(_LI("Configured pools: %(pool)s"), {'pool': self.pools})
- def _clone_volume(self, volume_name, clone_name, volume_id):
+ def _clone_volume(self, src_vol, clone_name, src_name=None):
"""Clones mounted volume using the HNAS file_clone.
- :param volume_name: string volume name
+ :param src_vol: object source volume
:param clone_name: string clone name (or snapshot)
- :param volume_id: string volume id
+ :param src_name: name of the source volume.
"""
- export_path = self._get_export_path(volume_id)
+ # when the source is a snapshot, we need to pass the source name and
+ # use the information of the volume that originated the snapshot to
+ # get the clone path.
+ if not src_name:
+ src_name = src_vol.name
+
# volume-ID snapshot-ID, /cinder
- LOG.info(_LI("Cloning with volume_name %(vname)s clone_name %(cname)s"
- " export_path %(epath)s"), {'vname': volume_name,
- 'cname': clone_name,
- 'epath': export_path})
+ LOG.info(_LI("Cloning with volume_name %(vname)s, clone_name %(cname)s"
+ " ,export_path %(epath)s"),
+ {'vname': src_name, 'cname': clone_name,
+ 'epath': src_vol.provider_location})
- source_vol = self._id_to_vol(volume_id)
- # sps; added target
- (_hdp, _path, _fslabel) = self._get_service(source_vol)
- target_path = '%s/%s' % (_path, clone_name)
- source_path = '%s/%s' % (_path, volume_name)
- out = self.bend.file_clone(self.config['hnas_cmd'],
- self.config['mgmt_ip0'],
- self.config['username'],
- self.config['password'],
- _fslabel, source_path, target_path)
+ (fs, path, fs_label) = self._get_service(src_vol)
- return out
+ target_path = '%s/%s' % (path, clone_name)
+ source_path = '%s/%s' % (path, src_name)
- def get_pool(self, volume):
- if not volume['volume_type']:
- return 'default'
- else:
- metadata = {}
- type_id = volume['volume_type_id']
- if type_id is not None:
- metadata = volume_types.get_volume_type_extra_specs(type_id)
- if not metadata.get('service_label'):
- return 'default'
- else:
- if metadata['service_label'] not in \
- self.config['services'].keys():
- return 'default'
- else:
- return metadata['service_label']
+ self.backend.file_clone(fs_label, source_path, target_path)
def create_volume(self, volume):
"""Creates a volume.
:param volume: volume reference
+ :returns: the volume provider_location
"""
self._ensure_shares_mounted()
- (_hdp, _path, _fslabel) = self._get_service(volume)
+ (fs_id, path, fslabel) = self._get_service(volume)
- volume['provider_location'] = _hdp
+ volume.provider_location = fs_id
LOG.info(_LI("Volume service: %(label)s. Casted to: %(loc)s"),
- {'label': _fslabel, 'loc': volume['provider_location']})
+ {'label': fslabel, 'loc': volume.provider_location})
self._do_create_volume(volume)
- return {'provider_location': volume['provider_location']}
+ return {'provider_location': fs_id}
def _convert_vol_ref_share_name_to_share_ip(self, vol_ref):
"""Converts the share point name to an IP address.
@@ -596,8 +393,10 @@ class HDSNFSDriver(nfs.NfsDriver):
The volume reference may have a DNS name portion in the share name.
Convert that to an IP address and then restore the entire path.
- :param vol_ref: driver-specific information used to identify a volume
- :returns: a volume reference where share is in IP format
+ :param vol_ref: driver-specific information used to identify a volume
+ :returns: a volume reference where share is in IP format or raises
+ error
+ :raises: e.strerror
"""
# First strip out share and convert to IP format.
@@ -608,7 +407,7 @@ class HDSNFSDriver(nfs.NfsDriver):
except socket.gaierror as e:
LOG.error(_LE('Invalid hostname %(host)s'),
{'host': share_split[0]})
- LOG.debug('error: %s', e.strerror)
+ LOG.debug('error: %(err)s', {'err': e.strerror})
raise
# Now place back into volume reference.
@@ -624,7 +423,8 @@ class HDSNFSDriver(nfs.NfsDriver):
if unsuccessful.
:param vol_ref: driver-specific information used to identify a volume
- :returns: NFS Share, NFS mount, volume path or raise error
+ :returns: NFS Share, NFS mount, volume path or raise error
+ :raises: ManageExistingInvalidReference
"""
# Check that the reference is valid.
if 'source-name' not in vol_ref:
@@ -677,30 +477,34 @@ class HDSNFSDriver(nfs.NfsDriver):
e.g., 10.10.32.1:/openstack/vol_to_manage
or 10.10.32.1:/openstack/some_directory/vol_to_manage
- :param volume: cinder volume to manage
+ :param volume: cinder volume to manage
:param existing_vol_ref: driver-specific information used to identify a
- volume
+ volume
+ :returns: the provider location
+ :raises: VolumeBackendAPIException
"""
# Attempt to find NFS share, NFS mount, and volume path from vol_ref.
- (nfs_share, nfs_mount, vol_path
+ (nfs_share, nfs_mount, vol_name
) = self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref)
LOG.debug("Asked to manage NFS volume %(vol)s, with vol ref %(ref)s.",
- {'vol': volume['id'],
+ {'vol': volume.id,
'ref': existing_vol_ref['source-name']})
+
self._check_pool_and_share(volume, nfs_share)
- if vol_path == volume['name']:
- LOG.debug("New Cinder volume %s name matches reference name: "
- "no need to rename.", volume['name'])
+
+ if vol_name == volume.name:
+ LOG.debug("New Cinder volume %(vol)s name matches reference name: "
+ "no need to rename.", {'vol': volume.name})
else:
- src_vol = os.path.join(nfs_mount, vol_path)
- dst_vol = os.path.join(nfs_mount, volume['name'])
+ src_vol = os.path.join(nfs_mount, vol_name)
+ dst_vol = os.path.join(nfs_mount, volume.name)
try:
- self._execute("mv", src_vol, dst_vol, run_as_root=False,
- check_exit_code=True)
- LOG.debug("Setting newly managed Cinder volume name to %s.",
- volume['name'])
+ self._try_execute("mv", src_vol, dst_vol, run_as_root=False,
+ check_exit_code=True)
+ LOG.debug("Setting newly managed Cinder volume name "
+ "to %(vol)s.", {'vol': volume.name})
self._set_rw_permissions_for_all(dst_vol)
except (OSError, processutils.ProcessExecutionError) as err:
exception_msg = (_("Failed to manage existing volume "
@@ -718,20 +522,20 @@ class HDSNFSDriver(nfs.NfsDriver):
one passed in the volume reference. Also, checks if the pool
for the volume type matches the pool for the host passed.
- :param volume: cinder volume reference
+ :param volume: cinder volume reference
:param nfs_share: NFS share passed to manage
+ :raises: ManageExistingVolumeTypeMismatch
"""
- pool_from_vol_type = self.get_pool(volume)
+ pool_from_vol_type = hnas_utils.get_pool(self.config, volume)
- pool_from_host = utils.extract_host(volume['host'], level='pool')
+ pool_from_host = utils.extract_host(volume.host, level='pool')
if self.config['services'][pool_from_vol_type]['hdp'] != nfs_share:
msg = (_("Failed to manage existing volume because the pool of "
- "the volume type chosen does not match the NFS share "
+ "the volume type chosen does not match the NFS share "
"passed in the volume reference."),
- {'Share passed': nfs_share,
- 'Share for volume type':
- self.config['services'][pool_from_vol_type]['hdp']})
+ {'Share passed': nfs_share, 'Share for volume type':
+ self.config['services'][pool_from_vol_type]['hdp']})
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
if pool_from_host != pool_from_vol_type:
@@ -739,7 +543,7 @@ class HDSNFSDriver(nfs.NfsDriver):
"the volume type chosen does not match the pool of "
"the host."),
{'Pool of the volume type': pool_from_vol_type,
- 'Pool of the host': pool_from_host})
+ 'Pool of the host': pool_from_host})
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
def manage_existing_get_size(self, volume, existing_vol_ref):
@@ -747,19 +551,24 @@ class HDSNFSDriver(nfs.NfsDriver):
When calculating the size, round up to the next GB.
- :param volume: cinder volume to manage
+ :param volume: cinder volume to manage
:param existing_vol_ref: existing volume to take under management
+ :returns: the size of the volume or raise error
+ :raises: VolumeBackendAPIException
"""
# Attempt to find NFS share, NFS mount, and volume path from vol_ref.
- (nfs_share, nfs_mount, vol_path
+ (nfs_share, nfs_mount, vol_name
) = self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref)
- try:
- LOG.debug("Asked to get size of NFS vol_ref %s.",
- existing_vol_ref['source-name'])
+ LOG.debug("Asked to get size of NFS vol_ref %(ref)s.",
+ {'ref': existing_vol_ref['source-name']})
- file_path = os.path.join(nfs_mount, vol_path)
+ if utils.check_already_managed_volume(vol_name):
+ raise exception.ManageExistingAlreadyManaged(volume_ref=vol_name)
+
+ try:
+ file_path = os.path.join(nfs_mount, vol_name)
file_size = float(cutils.get_file_size(file_path)) / units.Gi
vol_size = int(math.ceil(file_size))
except (OSError, ValueError):
@@ -783,8 +592,8 @@ class HDSNFSDriver(nfs.NfsDriver):
:param volume: cinder volume to unmanage
"""
- vol_str = CONF.volume_name_template % volume['id']
- path = self._get_mount_point_for_share(volume['provider_location'])
+ vol_str = CONF.volume_name_template % volume.id
+ path = self._get_mount_point_for_share(volume.provider_location)
new_str = "unmanage-" + vol_str
@@ -792,8 +601,8 @@ class HDSNFSDriver(nfs.NfsDriver):
new_path = os.path.join(path, new_str)
try:
- self._execute("mv", vol_path, new_path,
- run_as_root=False, check_exit_code=True)
+ self._try_execute("mv", vol_path, new_path,
+ run_as_root=False, check_exit_code=True)
LOG.info(_LI("Cinder NFS volume with current path %(cr)s is "
"no longer being managed."), {'cr': new_path})
diff --git a/cinder/volume/drivers/hitachi/hnas_utils.py b/cinder/volume/drivers/hitachi/hnas_utils.py
new file mode 100644
index 00000000000..1a1ec40c24e
--- /dev/null
+++ b/cinder/volume/drivers/hitachi/hnas_utils.py
@@ -0,0 +1,152 @@
+# Copyright (c) 2016 Hitachi Data Systems, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Shared code for HNAS drivers
+"""
+
+import os
+import re
+
+from oslo_log import log as logging
+from xml.etree import ElementTree as ETree
+
+from cinder import exception
+from cinder.i18n import _, _LI
+from cinder.volume import volume_types
+
+LOG = logging.getLogger(__name__)
+
+HNAS_DEFAULT_CONFIG = {'hnas_cmd': 'ssc',
+ 'chap_enabled': 'True',
+ 'ssh_port': '22'}
+
+MAX_HNAS_ISCSI_TARGETS = 32
+
+
+def _xml_read(root, element, check=None):
+ """Read an xml element.
+
+ :param root: XML object
+ :param element: string desired tag
+ :param check: string if present, throw exception if element missing
+ """
+
+ val = root.findtext(element)
+
+ # mandatory parameter not found
+ if val is None and check:
+ raise exception.ParameterNotFound(param=element)
+
+ # tag not found
+ if val is None:
+ return None
+
+ svc_tag_pattern = re.compile("svc_[0-3]$")
+ # tag found but empty parameter.
+ if not val.strip():
+ if svc_tag_pattern.search(element):
+ return ""
+ raise exception.ParameterNotFound(param=element)
+
+ LOG.debug(_LI("%(element)s: %(val)s"),
+ {'element': element,
+ 'val': val if element != 'password' else '***'})
+
+ return val.strip()
+
+
+def read_config(xml_config_file, svc_params, optional_params):
+ """Read Hitachi driver specific xml config file.
+
+ :param xml_config_file: string filename containing XML configuration
+ :param svc_params: parameters to configure the services
+ ['volume_type', 'hdp', 'iscsi_ip']
+ :param optional_params: parameters to configure that are not mandatory
+ ['hnas_cmd', 'ssh_enabled', 'cluster_admin_ip0', 'chap_enabled']
+ """
+
+ if not os.access(xml_config_file, os.R_OK):
+ msg = (_("Can't open config file: %s") % xml_config_file)
+ raise exception.NotFound(message=msg)
+
+ try:
+ root = ETree.parse(xml_config_file).getroot()
+ except ETree.ParseError:
+ msg = (_("Error parsing config file: %s") % xml_config_file)
+ raise exception.ConfigNotFound(message=msg)
+
+ # mandatory parameters for NFS and iSCSI
+ config = {}
+ arg_prereqs = ['mgmt_ip0', 'username']
+ for req in arg_prereqs:
+ config[req] = _xml_read(root, req, 'check')
+
+ # optional parameters for NFS and iSCSI
+ for req in optional_params:
+ config[req] = _xml_read(root, req)
+ if config[req] is None and HNAS_DEFAULT_CONFIG.get(req) is not None:
+ config[req] = HNAS_DEFAULT_CONFIG.get(req)
+
+ config['ssh_private_key'] = _xml_read(root, 'ssh_private_key')
+ config['password'] = _xml_read(root, 'password')
+
+ if config['ssh_private_key'] is None and config['password'] is None:
+ msg = (_("Missing authentication option (passw or private key file)."))
+ raise exception.ConfigNotFound(message=msg)
+
+ config['ssh_port'] = _xml_read(root, 'ssh_port')
+ if config['ssh_port'] is None:
+ config['ssh_port'] = HNAS_DEFAULT_CONFIG['ssh_port']
+
+ config['fs'] = {}
+ config['services'] = {}
+
+ # min one needed
+ for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']:
+ if _xml_read(root, svc) is None:
+ continue
+ service = {'label': svc}
+
+ # none optional
+ for arg in svc_params:
+ service[arg] = _xml_read(root, svc + '/' + arg, 'check')
+ config['services'][service['volume_type']] = service
+ config['fs'][service['hdp']] = service['hdp']
+
+ # at least one service required!
+ if not config['services'].keys():
+ msg = (_("svc_0"))
+ raise exception.ParameterNotFound(param=msg)
+
+ return config
+
+
+def get_pool(config, volume):
+ """Get the pool of a volume.
+
+ :param config: dictionary containing the configuration parameters
+ :param volume: dictionary volume reference
+ :returns: the pool related to the volume
+ """
+ if volume.volume_type:
+ metadata = {}
+ type_id = volume.volume_type_id
+ if type_id is not None:
+ metadata = volume_types.get_volume_type_extra_specs(type_id)
+ if metadata.get('service_label'):
+ if metadata['service_label'] in config['services'].keys():
+ return metadata['service_label']
+ return 'default'
diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py
index 29951dbcf05..9ce2c66267d 100644
--- a/cinder/volume/manager.py
+++ b/cinder/volume/manager.py
@@ -140,9 +140,13 @@ MAPPING = {
'cinder.volume.drivers.fujitsu_eternus_dx_iscsi.FJDXISCSIDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver',
'cinder.volume.drivers.hds.nfs.HDSNFSDriver':
- 'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver',
+ 'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver',
'cinder.volume.drivers.hds.iscsi.HDSISCSIDriver':
- 'cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver',
+ 'cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver',
+ 'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver':
+ 'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver',
+ 'cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver':
+ 'cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver',
'cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver':
'cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver',
'cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver':
diff --git a/releasenotes/notes/hnas-drivers-refactoring-9dbe297ffecced21.yaml b/releasenotes/notes/hnas-drivers-refactoring-9dbe297ffecced21.yaml
new file mode 100644
index 00000000000..6ae57511234
--- /dev/null
+++ b/releasenotes/notes/hnas-drivers-refactoring-9dbe297ffecced21.yaml
@@ -0,0 +1,7 @@
+upgrade:
+ - HNAS drivers have new configuration paths. Users should now use
+ ``cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver`` for HNAS NFS driver
+ and ``cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver`` for HNAS
+ iSCSI driver.
+deprecations:
+ - The old HNAS drivers configuration paths have been marked for deprecation.