diff --git a/cinder/tests/unit/volume/drivers/nec/__init__.py b/cinder/tests/unit/volume/drivers/nec/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/cinder/tests/unit/volume/drivers/nec/cli_test.py b/cinder/tests/unit/volume/drivers/nec/cli_test.py
new file mode 100644
index 00000000000..09bee78131e
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/nec/cli_test.py
@@ -0,0 +1,435 @@
+#
+# Copyright (c) 2016 NEC Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+class MStorageISMCLI(object):
+ def __init__(self, properties):
+ super(MStorageISMCLI, self).__init__()
+
+ self._properties = properties
+
+ def view_all(self, conf_ismview_path=None, delete_ismview=True,
+ cmd_lock=True):
+ out = '''
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Command Completed Successfully!!
+ 0
+
+
+ '''
+ return out
+
+ def ldbind(self, name, pool, ldn, size):
+ return True, 0
+
+ def unbind(self, name):
+ pass
+
+ def expand(self, ldn, capacity):
+ pass
+
+ def addldsetld(self, ldset, ldname, lun=None):
+ pass
+
+ def delldsetld(self, ldset, ldname):
+ if ldname == "LX:287RbQoP7VdwR1WsPC2fZT":
+ return False, 'iSM31064'
+ else:
+ return True, None
+
+ def changeldname(self, ldn, new_name, old_name=None):
+ pass
+
+ def setpair(self, mvname, rvname):
+ pass
+
+ def unpair(self, mvname, rvname, flag):
+ pass
+
+ def replicate(self, mvname, rvname, flag):
+ pass
+
+ def separate(self, mvname, rvname, flag):
+ pass
+
+ def query_MV_RV_status(self, ldname, rpltype):
+ return 'separated'
+
+ def query_MV_RV_name(self, ldname, rpltype):
+ pass
+
+ def query_MV_RV_diff(self, ldname, rpltype):
+ pass
+
+ def backup_restore(self, volume_properties, unpairWait, canPairing=True):
+ pass
+
+ def check_ld_existed_rplstatus(self, lds, ldname, snapshot, flag):
+ return {'ld_capacity': 10}
+
+ def get_pair_lds(self, ldname, lds):
+ return {'0004': {'ld_capacity': 1, 'pool_num': 0,
+ 'ldname': 'LX:287RbQoP7VdwR1WsPC2fZT_back',
+ 'ldn': 4, 'RPL Attribute': 'RV', 'Purpose': '---'}}
+
+ def snapshot_create(self, bvname, svname, poolnumber):
+ pass
+
+ def snapshot_delete(self, bvname, svname):
+ pass
+
+ def query_BV_SV_status(self, bvname, svname):
+ return 'snap/active'
+
+ def set_io_limit(self, ldname, specs, force_delete=True):
+ pass
diff --git a/cinder/tests/unit/volume/drivers/nec/test_volume.py b/cinder/tests/unit/volume/drivers/nec/test_volume.py
new file mode 100644
index 00000000000..25be1360b12
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/nec/test_volume.py
@@ -0,0 +1,643 @@
+#
+# Copyright (c) 2016 NEC Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import ddt
+import mock
+import unittest
+
+from cinder import exception
+from cinder.tests.unit.volume.drivers.nec import volume_common_test
+from cinder.volume.drivers.nec import volume_helper
+
+
+@ddt.ddt
+class VolumeIDConvertTest(volume_helper.MStorageDriver, unittest.TestCase):
+
+ def setUp(self):
+ self._common = volume_common_test.MStorageVolCommDummy(1, 2, 3)
+ self.do_setup(None)
+ self.vol = {}
+ self._properties = self._common.get_conf_properties()
+ self._cli = self._properties['cli']
+ self.xml = self._cli.view_all(self._properties['ismview_path'])
+ (self.pools,
+ self.lds,
+ self.ldsets,
+ self.used_ldns,
+ self.hostports,
+ self.max_ld_count) = self._common.configs(self.xml)
+
+ def tearDown(self):
+ pass
+
+ @ddt.data(("AAAAAAAA", "LX:37mA82"), ("BBBBBBBB", "LX:3R9ZwR"))
+ @ddt.unpack
+ def test_volumeid_should_change_62scale(self, volid, ldname):
+ self.vol['id'] = volid
+ actual = self._convert_id2name(self.vol)
+ self.assertEqual(ldname, actual,
+ "ID:%(volid)s should be change to %(ldname)s" %
+ {'volid': volid, 'ldname': ldname})
+
+ @ddt.data(("AAAAAAAA", "LX:37mA82_back"), ("BBBBBBBB", "LX:3R9ZwR_back"))
+ @ddt.unpack
+ def test_snap_volumeid_should_change_62scale_andpostfix(self,
+ volid,
+ ldname):
+ self.vol['id'] = volid
+ actual = self._convert_id2snapname(self.vol)
+ self.assertEqual(ldname, actual,
+ "ID:%(volid)s should be change to %(ldname)s" %
+ {'volid': volid, 'ldname': ldname})
+
+ @ddt.data(("AAAAAAAA", "LX:37mA82_m"), ("BBBBBBBB", "LX:3R9ZwR_m"))
+ @ddt.unpack
+ def test_ddrsnap_volumeid_should_change_62scale_and_m(self,
+ volid,
+ ldname):
+ self.vol['id'] = volid
+ actual = self._convert_id2migratename(self.vol)
+ self.assertEqual(ldname, actual,
+ "ID:%(volid)s should be change to %(ldname)s" %
+ {'volid': volid, 'ldname': ldname})
+
+ @ddt.data(("AAAAAAAA", "LX:3R9ZwR", "target:BBBBBBBB"))
+ @ddt.unpack
+ def test_migrate_volumeid_should_change_62scale_andpostfix(self,
+ volid,
+ ldname,
+ status):
+ self.vol['id'] = volid
+ self.vol['migration_status'] = status
+ actual = self._convert_id2name_in_migrate(self.vol)
+ self.assertEqual(ldname, actual,
+ "ID:%(volid)s/%(status)s should be "
+ "change to %(ldname)s" %
+ {'volid': volid,
+ 'status': status,
+ 'ldname': ldname})
+
+ @ddt.data(("AAAAAAAA", "LX:37mA82", "deleting:BBBBBBBB"),
+ ("AAAAAAAA", "LX:37mA82", ""),
+ ("AAAAAAAA", "LX:37mA82", "success"))
+ @ddt.unpack
+ def test_NOTmigrate_volumeid_should_change_62scale(self,
+ volid,
+ ldname,
+ status):
+ self.vol['id'] = volid
+ self.vol['migration_status'] = status
+ actual = self._convert_id2name_in_migrate(self.vol)
+ self.assertEqual(ldname, actual,
+ "ID:%(volid)s/%(status)s should be "
+ "change to %(ldname)s" %
+ {'volid': volid,
+ 'status': status,
+ 'ldname': ldname})
+
+
+class NominatePoolLDTest(volume_helper.MStorageDriver, unittest.TestCase):
+
+ def setUp(self):
+ self._common = volume_common_test.MStorageVolCommDummy(1, 2, 3)
+ self.do_setup(None)
+ self.vol = {}
+ self._properties = self._common.get_conf_properties()
+ self._cli = self._properties['cli']
+ self.xml = self._cli.view_all(self._properties['ismview_path'])
+ (self.pools,
+ self.lds,
+ self.ldsets,
+ self.used_ldns,
+ self.hostports,
+ self.max_ld_count) = self._common.configs(self.xml)
+ self._numofld_per_pool = 1024
+
+ def tearDown(self):
+ pass
+
+ def test_getxml(self):
+ self.assertIsNotNone(self.xml, "iSMview xml should not be None")
+
+ def test_selectldn_for_normalvolume(self):
+ ldn = self._select_ldnumber(self.used_ldns, self.max_ld_count)
+ self.assertEqual(2, ldn, "selected ldn should be XXX")
+
+ def test_selectpool_for_normalvolume(self):
+ self.vol['size'] = 10
+ pool = self._select_leastused_poolnumber(self.vol,
+ self.pools,
+ self.xml)
+ self.assertEqual(1, pool, "selected pool should be 1")
+ # config:pool_pools=[1]
+ self.vol['size'] = 999999999999
+ with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
+ 'No available pools found.'):
+ pool = self._select_leastused_poolnumber(self.vol,
+ self.pools,
+ self.xml)
+
+ def test_selectpool_for_migratevolume(self):
+ self.vol['id'] = "46045673-41e7-44a7-9333-02f07feab04b"
+ self.vol['size'] = 10
+ self.vol['pool_num'] = 0
+ pool = self._select_migrate_poolnumber(self.vol,
+ self.pools,
+ self.xml,
+ [1])
+ self.assertEqual(1, pool, "selected pool should be 1")
+ self.vol['id'] = "1febb976-86d0-42ed-9bc0-4aa3e158f27d"
+ self.vol['size'] = 10
+ self.vol['pool_num'] = 1
+ pool = self._select_migrate_poolnumber(self.vol,
+ self.pools,
+ self.xml,
+ [1])
+ self.assertEqual(-1, pool, "selected pool is the same pool(return -1)")
+ self.vol['size'] = 999999999999
+ with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
+ 'No available pools found.'):
+ pool = self._select_migrate_poolnumber(self.vol,
+ self.pools,
+ self.xml,
+ [1])
+
+ def test_selectpool_for_snapvolume(self):
+ self.vol['size'] = 10
+ savePool1 = self.pools[1]['free']
+ self.pools[1]['free'] = 0
+ pool = self._select_dsv_poolnumber(self.vol, self.pools)
+ self.assertEqual(2, pool, "selected pool should be 2")
+ # config:pool_backup_pools=[2]
+ self.pools[1]['free'] = savePool1
+
+ if len(self.pools[0]['ld_list']) is 1024:
+ savePool2 = self.pools[2]['free']
+ savePool3 = self.pools[3]['free']
+ self.pools[2]['free'] = 0
+ self.pools[3]['free'] = 0
+ with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
+ 'No available pools found.'):
+ pool = self._select_dsv_poolnumber(self.vol, self.pools)
+ self.pools[2]['free'] = savePool2
+ self.pools[3]['free'] = savePool3
+
+ self.vol['size'] = 999999999999
+ pool = self._select_dsv_poolnumber(self.vol, self.pools)
+ self.assertEqual(2, pool, "selected pool should be 2")
+ # config:pool_backup_pools=[2]
+
+ def test_selectpool_for_ddrvolume(self):
+ self.vol['size'] = 10
+ pool = self._select_ddr_poolnumber(self.vol,
+ self.pools,
+ self.xml,
+ 10)
+ self.assertEqual(2, pool, "selected pool should be 2")
+ # config:pool_backup_pools=[2]
+
+ savePool2 = self.pools[2]['free']
+ savePool3 = self.pools[3]['free']
+ self.pools[2]['free'] = 0
+ self.pools[3]['free'] = 0
+ with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
+ 'No available pools found.'):
+ pool = self._select_ddr_poolnumber(self.vol,
+ self.pools,
+ self.xml,
+ 10)
+ self.pools[2]['free'] = savePool2
+ self.pools[3]['free'] = savePool3
+
+ self.vol['size'] = 999999999999
+ with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
+ 'No available pools found.'):
+ pool = self._select_ddr_poolnumber(self.vol,
+ self.pools,
+ self.xml,
+ 999999999999)
+
+ def test_selectpool_for_volddrvolume(self):
+ self.vol['size'] = 10
+ pool = self._select_volddr_poolnumber(self.vol,
+ self.pools,
+ self.xml,
+ 10)
+ self.assertEqual(1, pool, "selected pool should be 1")
+ # config:pool_backup_pools=[2]
+
+ savePool0 = self.pools[0]['free']
+ savePool1 = self.pools[1]['free']
+ self.pools[0]['free'] = 0
+ self.pools[1]['free'] = 0
+ with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
+ 'No available pools found.'):
+ pool = self._select_volddr_poolnumber(self.vol,
+ self.pools,
+ self.xml,
+ 10)
+ self.pools[0]['free'] = savePool0
+ self.pools[1]['free'] = savePool1
+
+ self.vol['size'] = 999999999999
+ with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
+ 'No available pools found.'):
+ pool = self._select_volddr_poolnumber(self.vol,
+ self.pools,
+ self.xml,
+ 999999999999)
+
+
+class VolumeCreateTest(volume_helper.MStorageDriver, unittest.TestCase):
+
+ def setUp(self):
+ self._common = volume_common_test.MStorageVolCommDummy(1, 2, 3)
+ self.do_setup(None)
+ self.vol = {}
+ self._properties = self._common.get_conf_properties()
+ self._cli = self._properties['cli']
+ self.xml = self._cli.view_all(self._properties['ismview_path'])
+ (self.pools,
+ self.lds,
+ self.ldsets,
+ self.used_ldns,
+ self.hostports,
+ self.max_ld_count) = self._common.configs(self.xml)
+
+ def tearDown(self):
+ pass
+
+ def test_validate_migrate_volume(self):
+ self.vol['id'] = "46045673-41e7-44a7-9333-02f07feab04b"
+ self.vol['size'] = 10
+ self.vol['status'] = 'available'
+ self._validate_migrate_volume(self.vol, self.xml)
+
+ self.vol['id'] = "46045673-41e7-44a7-9333-02f07feab04b"
+ self.vol['size'] = 10
+ self.vol['status'] = 'creating'
+ with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
+ 'Specified Logical Disk'
+ ' LX:287RbQoP7VdwR1WsPC2fZT'
+ ' is not available.'):
+ self._validate_migrate_volume(self.vol, self.xml)
+
+ self.vol['id'] = "AAAAAAAA"
+ self.vol['size'] = 10
+ self.vol['status'] = 'available'
+ with self.assertRaisesRegexp(exception.NotFound,
+ 'Logical Disk `LX:37mA82`'
+ ' does not exist.'):
+ self._validate_migrate_volume(self.vol, self.xml)
+
+ def test_extend_volume(self):
+ mv = self.lds["LX:287RbQoP7VdwR1WsPC2fZT"] # MV-LDN:0 RV-LDN:4
+ rvs = self._can_extend_capacity(10, self.pools, self.lds, mv)
+ self.assertEqual("LX:287RbQoP7VdwR1WsPC2fZT_back", rvs[4]['ldname'])
+ with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
+ 'Not enough pool capacity.'
+ ' pool_number=0,'
+ ' size_increase=1073741822926258176'):
+ self._can_extend_capacity(1000000000,
+ self.pools,
+ self.lds, mv)
+
+ self.vol['id'] = "46045673-41e7-44a7-9333-02f07feab04b" # MV
+ self.vol['size'] = 1
+ self.vol['status'] = 'available'
+ self.extend_volume(self.vol, 10)
+
+ self.vol['id'] = "00046058-d38e-7f60-67b7-59ed65e54225" # RV
+ self.vol['size'] = 1
+ self.vol['status'] = 'available'
+ with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
+ 'RPL Attribute Error.'
+ ' RPL Attribute = RV'):
+ self.extend_volume(self.vol, 10)
+
+
+class BindLDTest(volume_helper.MStorageDriver, unittest.TestCase):
+
+ def setUp(self):
+ self._common = volume_common_test.MStorageVolCommDummy(1, 2, 3)
+ self.do_setup(None)
+ self.vol = {}
+ self._properties = self._common.get_conf_properties()
+ self._cli = self._properties['cli']
+ self.xml = self._cli.view_all(self._properties['ismview_path'])
+ (self.pools,
+ self.lds,
+ self.ldsets,
+ self.used_ldns,
+ self.hostports,
+ self.max_ld_count) = self._common.configs(self.xml)
+ self.src = {}
+ mock_bindld = mock.Mock()
+ self._bind_ld = mock_bindld
+ self._bind_ld.return_value = (0, 0, 0)
+
+ def test_bindld_CreateVolume(self):
+ self.vol['id'] = "AAAAAAAA"
+ self.vol['size'] = 1
+ self.vol['migration_status'] = "success"
+ self.create_volume(self.vol)
+ self._bind_ld.assert_called_once_with(
+ self.vol, self.vol['size'], None,
+ self._convert_id2name_in_migrate,
+ self._select_leastused_poolnumber)
+
+ def test_bindld_CreateCloneVolume(self):
+ self.vol['id'] = "AAAAAAAA"
+ self.vol['size'] = 1
+ self.vol['migration_status'] = "success"
+ self.src['id'] = "46045673-41e7-44a7-9333-02f07feab04b"
+ self.src['size'] = 1
+ self.create_cloned_volume(self.vol, self.src)
+ self._bind_ld.assert_called_once_with(
+ self.vol, self.vol['size'], None,
+ self._convert_id2name,
+ self._select_leastused_poolnumber)
+
+
+class BindLDTest_iSCSISnap(volume_helper.MStorageDriver,
+ unittest.TestCase):
+
+ def setUp(self):
+ self._common = volume_common_test.MStorageVolCommDummy(1, 2, 3)
+ self.do_setup(None)
+ self.vol = {}
+ self._properties = self._common.get_conf_properties()
+ self._cli = self._properties['cli']
+ self.xml = self._cli.view_all(self._properties['ismview_path'])
+ (self.pools,
+ self.lds,
+ self.ldsets,
+ self.used_ldns,
+ self.hostports,
+ self.max_ld_count) = self._common.configs(self.xml)
+ self.snap = {}
+ mock_bindld = mock.Mock()
+ self._bind_ld = mock_bindld
+ self._bind_ld.return_value = (0, 0, 0)
+
+ def test_bindld_CreateSnapshot(self):
+ self.snap['id'] = "AAAAAAAA"
+ self.snap['volume_id'] = "1febb976-86d0-42ed-9bc0-4aa3e158f27d"
+ self.snap['size'] = 10
+ self.create_snapshot(self.snap)
+ self._bind_ld.assert_called_once_with(
+ self.snap, 10, None,
+ self._convert_id2snapname,
+ self._select_ddr_poolnumber, 10)
+
+ def test_bindld_CreateFromSnapshot(self):
+ self.vol['id'] = "AAAAAAAA"
+ self.vol['size'] = 1
+ self.vol['migration_status'] = "success"
+ self.snap['id'] = "63410c76-2f12-4473-873d-74a63dfcd3e2"
+ self.snap['volume_id'] = "92dbc7f4-dbc3-4a87-aef4-d5a2ada3a9af"
+ self.create_volume_from_snapshot(self.vol, self.snap)
+ self._bind_ld.assert_called_once_with(
+ self.vol, 10, None,
+ self._convert_id2name,
+ self._select_volddr_poolnumber, 10)
+
+
+class BindLDTest_Snap(volume_helper.MStorageDSVDriver, unittest.TestCase):
+
+ def setUp(self):
+ self._common = volume_common_test.MStorageVolCommDummy(1, 2, 3)
+ self.do_setup(None)
+ self.vol = {}
+ self._properties = self._common.get_conf_properties()
+ self._cli = self._properties['cli']
+ self.xml = self._cli.view_all(self._properties['ismview_path'])
+ (self.pools,
+ self.lds,
+ self.ldsets,
+ self.used_ldns,
+ self.hostports,
+ self.max_ld_count) = self._common.configs(self.xml)
+ self.snap = {}
+ mock_bindld = mock.Mock()
+ self._bind_ld = mock_bindld
+ self._bind_ld.return_value = (0, 0, 0)
+
+ def test_bindld_CreateFromSnapshot(self):
+ self.vol['id'] = "AAAAAAAA"
+ self.vol['size'] = 1
+ self.vol['migration_status'] = "success"
+ self.snap['id'] = "63410c76-2f12-4473-873d-74a63dfcd3e2"
+ self.snap['volume_id'] = "1febb976-86d0-42ed-9bc0-4aa3e158f27d"
+ self.create_volume_from_snapshot(self.vol, self.snap)
+ self._bind_ld.assert_called_once_with(
+ self.vol, 1, None,
+ self._convert_id2name,
+ self._select_volddr_poolnumber, 1)
+
+
+class ExportTest(volume_helper.MStorageDriver, unittest.TestCase):
+
+ def setUp(self):
+ self._common = volume_common_test.MStorageVolCommDummy(1, 2, 3)
+ self.do_setup(None)
+ self.vol = {}
+ self._properties = self._common.get_conf_properties()
+ self._cli = self._properties['cli']
+ self.xml = self._cli.view_all(self._properties['ismview_path'])
+ (self.pools,
+ self.lds,
+ self.ldsets,
+ self.used_ldns,
+ self.hostports,
+ self.max_ld_count) = self._common.configs(self.xml)
+ mock_getldset = mock.Mock()
+ self._common.get_ldset = mock_getldset
+ self._common.get_ldset.return_value = self.ldsets["LX:OpenStack0"]
+
+ def tearDown(self):
+ pass
+
+ def test_iscsi_portal(self):
+ self.vol['id'] = "46045673-41e7-44a7-9333-02f07feab04b"
+ self.vol['size'] = 10
+ self.vol['status'] = None
+ self.vol['migration_status'] = None
+ connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"}
+ self.iscsi_do_export(None, self.vol, connector)
+
+ def test_fc_do_export(self):
+ self.vol['id'] = "46045673-41e7-44a7-9333-02f07feab04b"
+ self.vol['size'] = 10
+ self.vol['status'] = None
+ self.vol['migration_status'] = None
+ connector = {'wwpns': ["1000-0090-FAA0-723A", "1000-0090-FAA0-723B"]}
+ self.fc_do_export(None, self.vol, connector)
+
+ def test_remove_export(self):
+ self.vol['id'] = "46045673-41e7-44a7-9333-02f07feab04b"
+ self.vol['size'] = 10
+ self.vol['status'] = 'uploading'
+ self.vol['attach_status'] = 'attached'
+ self.vol['migration_status'] = None
+ context = mock.Mock()
+ ret = self.remove_export(context, self.vol)
+ self.assertIsNone(ret)
+
+ self.vol['attach_status'] = None
+
+ self.vol['status'] = 'downloading'
+ with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
+ 'Failed to unregister Logical Disk from'
+ ' Logical Disk Set \(iSM31064\)'):
+ self.remove_export(context, self.vol)
+
+ self.vol['status'] = None
+ migstat = 'target:1febb976-86d0-42ed-9bc0-4aa3e158f27d'
+ self.vol['migration_status'] = migstat
+ ret = self.remove_export(context, self.vol)
+ self.assertIsNone(ret)
+
+ def test_iscsi_initialize_connection(self):
+ self.vol['id'] = "46045673-41e7-44a7-9333-02f07feab04b"
+ loc = "127.0.0.1:3260:1 iqn.2010-10.org.openstack:volume-00000001 88"
+ self.vol['provider_location'] = loc
+ connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255",
+ 'multipath': True}
+ info = self._iscsi_initialize_connection(self.vol, connector)
+ self.assertEqual('iscsi', info['driver_volume_type'])
+ self.assertEqual('iqn.2010-10.org.openstack:volume-00000001',
+ info['data']['target_iqn'])
+ self.assertEqual('127.0.0.1:3260', info['data']['target_portal'])
+ self.assertEqual(88, info['data']['target_lun'])
+ self.assertEqual('iqn.2010-10.org.openstack:volume-00000001',
+ info['data']['target_iqns'][0])
+ self.assertEqual('127.0.0.1:3260', info['data']['target_portals'][0])
+ self.assertEqual(88, info['data']['target_luns'][0])
+
+ def test_fc_initialize_connection(self):
+ self.vol['id'] = "46045673-41e7-44a7-9333-02f07feab04b"
+ self.vol['migration_status'] = None
+ connector = {'wwpns': ["1000-0090-FAA0-723A", "1000-0090-FAA0-723B"]}
+ info = self._fc_initialize_connection(self.vol, connector)
+ self.assertEqual('fibre_channel', info['driver_volume_type'])
+ self.assertEqual('2100000991020012', info['data']['target_wwn'][0])
+ self.assertEqual('2200000991020012', info['data']['target_wwn'][1])
+ self.assertEqual('2900000991020012', info['data']['target_wwn'][2])
+ self.assertEqual('2A00000991020012', info['data']['target_wwn'][3])
+ self.assertEqual(
+ '2100000991020012',
+ info['data']['initiator_target_map']['1000-0090-FAA0-723A'][0])
+ self.assertEqual(
+ '2100000991020012',
+ info['data']['initiator_target_map']['1000-0090-FAA0-723B'][0])
+ self.assertEqual(
+ '2200000991020012',
+ info['data']['initiator_target_map']['1000-0090-FAA0-723A'][1])
+ self.assertEqual(
+ '2200000991020012',
+ info['data']['initiator_target_map']['1000-0090-FAA0-723B'][1])
+ self.assertEqual(
+ '2900000991020012',
+ info['data']['initiator_target_map']['1000-0090-FAA0-723A'][2])
+ self.assertEqual(
+ '2900000991020012',
+ info['data']['initiator_target_map']['1000-0090-FAA0-723B'][2])
+ self.assertEqual(
+ '2A00000991020012',
+ info['data']['initiator_target_map']['1000-0090-FAA0-723A'][3])
+ self.assertEqual(
+ '2A00000991020012',
+ info['data']['initiator_target_map']['1000-0090-FAA0-723B'][3])
+
+ def test_fc_terminate_connection(self):
+ self.vol['id'] = "46045673-41e7-44a7-9333-02f07feab04b"
+ connector = {'wwpns': ["1000-0090-FAA0-723A", "1000-0090-FAA0-723B"]}
+ info = self._fc_terminate_connection(self.vol, connector)
+ self.assertEqual('fibre_channel', info['driver_volume_type'])
+ self.assertEqual('2100000991020012', info['data']['target_wwn'][0])
+ self.assertEqual('2200000991020012', info['data']['target_wwn'][1])
+ self.assertEqual('2900000991020012', info['data']['target_wwn'][2])
+ self.assertEqual('2A00000991020012', info['data']['target_wwn'][3])
+ self.assertEqual(
+ '2100000991020012',
+ info['data']['initiator_target_map']['1000-0090-FAA0-723A'][0])
+ self.assertEqual(
+ '2100000991020012',
+ info['data']['initiator_target_map']['1000-0090-FAA0-723B'][0])
+ self.assertEqual(
+ '2200000991020012',
+ info['data']['initiator_target_map']['1000-0090-FAA0-723A'][1])
+ self.assertEqual(
+ '2200000991020012',
+ info['data']['initiator_target_map']['1000-0090-FAA0-723B'][1])
+ self.assertEqual(
+ '2900000991020012',
+ info['data']['initiator_target_map']['1000-0090-FAA0-723A'][2])
+ self.assertEqual(
+ '2900000991020012',
+ info['data']['initiator_target_map']['1000-0090-FAA0-723B'][2])
+ self.assertEqual(
+ '2A00000991020012',
+ info['data']['initiator_target_map']['1000-0090-FAA0-723A'][3])
+ self.assertEqual(
+ '2A00000991020012',
+ info['data']['initiator_target_map']['1000-0090-FAA0-723B'][3])
+
+
+class DeleteDSVVolume_test(volume_helper.MStorageDSVDriver,
+ unittest.TestCase):
+
+ def setUp(self):
+ self._common = volume_common_test.MStorageVolCommDummy(1, 2, 3)
+ self.do_setup(None)
+ self.vol = {}
+ self._properties = self._common.get_conf_properties()
+ self._cli = self._properties['cli']
+ self.xml = self._cli.view_all(self._properties['ismview_path'])
+ (self.pools,
+ self.lds,
+ self.ldsets,
+ self.used_ldns,
+ self.hostports,
+ self.max_ld_count) = self._common.configs(self.xml)
+
+ def patch_query_MV_RV_status(self, ldname, rpltype):
+ return 'replicated'
+
+ @mock.patch('cinder.tests.unit.volume.drivers.nec.cli_test.'
+ 'MStorageISMCLI.query_MV_RV_status', patch_query_MV_RV_status)
+ def test_delete_volume(self):
+ # MV not separated
+ self.vol['id'] = "46045673-41e7-44a7-9333-02f07feab04b"
+ with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
+ 'Specified Logical Disk'
+ ' LX:287RbQoP7VdwR1WsPC2fZT'
+ ' has been copied.'):
+ self.delete_volume(self.vol)
+ # RV not separated
+ self.vol['id'] = "00046058-d38e-7f60-67b7-59ed65e54225"
+ with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
+ 'Specified Logical Disk'
+ ' LX:20000009910200140005'
+ ' has been copied.'):
+ self.delete_volume(self.vol)
+
+ def test_delete_snapshot(self):
+ self.vol['id'] = "63410c76-2f12-4473-873d-74a63dfcd3e2"
+ self.vol['volume_id'] = "1febb976-86d0-42ed-9bc0-4aa3e158f27d"
+ ret = self.delete_snapshot(self.vol)
+ self.assertIsNone(ret)
diff --git a/cinder/tests/unit/volume/drivers/nec/volume_common_test.py b/cinder/tests/unit/volume/drivers/nec/volume_common_test.py
new file mode 100644
index 00000000000..81b2afe7a6e
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/nec/volume_common_test.py
@@ -0,0 +1,299 @@
+#
+# Copyright (c) 2016 NEC Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+
+from oslo_utils import units
+
+from cinder.tests.unit.volume.drivers.nec import cli_test
+from cinder.volume.drivers.nec import volume_common
+
+
+class MStorageVolCommDummy(object):
+ def __init__(self, configuration, host, driver_name):
+ super(MStorageVolCommDummy, self).__init__()
+ self._properties = self.get_conf_properties()
+ self._context = None
+
+ def set_context(self, context):
+ self._context = context
+
+ def get_conf(self, host):
+ return self.get_conf_properties()
+
+ def get_conf_properties(self, conf=None):
+ conf = {
+ 'cli': None,
+ 'cli_fip': '10.64.169.250',
+ 'cli_user': 'sysadmin',
+ 'cli_password': 'sys123',
+ 'cli_privkey': 'sys123',
+ 'pool_pools': [0, 1],
+ 'pool_backup_pools': [2, 3],
+ 'pool_actual_free_capacity': 50000000000,
+ 'ldset_name': 'LX:OpenStack0',
+ 'ldset_controller_node_name': 'LX:node0',
+ 'ld_name_format': 'LX:%s',
+ 'ld_backupname_format': 'LX:%s_back',
+ 'ld_backend_max_count': 1024,
+ 'thread_timeout': 5,
+ 'ismview_dir': 'view',
+ 'ismview_alloptimize': '',
+ 'ssh_pool_port_number': 22,
+ 'diskarray_name': 'node0',
+ 'queryconfig_view': '',
+ 'ismview_path': None,
+ 'driver_name': 'MStorageISCSIDriver',
+ 'config_group': '',
+ 'configuration': '',
+ 'vendor_name': 'nec',
+ 'products': '',
+ 'backend_name': '',
+ 'portal_number': 2
+ }
+ conf['cli'] = cli_test.MStorageISMCLI(conf)
+ return conf
+
+ @staticmethod
+ def get_ldname(volid, volformat):
+ return volume_common.MStorageVolumeCommon.get_ldname(volid, volformat)
+
+ def get_diskarray_max_ld_count(self):
+ return 8192
+
+ def get_pool_config(self, xml, root):
+ pools = {}
+ for xmlobj in root.xpath('./'
+ 'CMD_REQUEST/'
+ 'CHAPTER[@name="Pool"]/'
+ 'OBJECT[@name="Pool"]'):
+ section = xmlobj.find('./SECTION[@name="Pool Detail Information"]')
+ unit = section.find('./UNIT[@name="Pool No.(h)"]')
+ pool_num = int(unit.text, 16)
+ unit = section.find('UNIT[@name="Pool Capacity"]')
+ total = int(unit.text, 10)
+ unit = section.find('UNIT[@name="Free Pool Capacity"]')
+ free = int(unit.text, 10)
+ if self._properties['pool_actual_free_capacity']:
+ unit = section.find('UNIT[@name="Used Pool Capacity"]')
+ used = int(unit.text, 10)
+ for section in xmlobj.xpath('./SECTION[@name='
+ '"Virtual Capacity Pool '
+ 'Information"]'):
+ unit = section.find('UNIT[@name="Actual Capacity"]')
+ total = int(unit.text, 10)
+ free = total - used
+ pool = {'pool_num': pool_num,
+ 'total': total,
+ 'free': free,
+ 'ld_list': []}
+ pools[pool_num] = pool
+ return pools
+
+ def get_ld_config(self, xml, root, pools):
+ lds = {}
+ used_ldns = []
+ for section in root.xpath('./'
+ 'CMD_REQUEST/'
+ 'CHAPTER[@name="Logical Disk"]/'
+ 'OBJECT[@name="Logical Disk"]/'
+ 'SECTION[@name="LD Detail Information"]'):
+ unit = section.find('./UNIT[@name="LDN(h)"]')
+ ldn = int(unit.text, 16)
+ unit = section.find('./UNIT[@name="OS Type"]')
+ ostype = unit.text if unit.text is not None else ''
+ unit = section.find('./UNIT[@name="LD Name"]')
+ ldname = ostype + ':' + unit.text
+ unit = section.find('./UNIT[@name="Pool No.(h)"]')
+ pool_num = int(unit.text, 16)
+
+ unit = section.find('./UNIT[@name="LD Capacity"]')
+
+ # byte capacity transform GB capacity.
+ ld_capacity = int(unit.text, 10) // units.Gi
+
+ unit = section.find('./UNIT[@name="RPL Attribute"]')
+ rplatr = unit.text
+
+ unit = section.find('./UNIT[@name="Purpose"]')
+ purpose = unit.text
+
+ ld = {'ldname': ldname,
+ 'ldn': ldn,
+ 'pool_num': pool_num,
+ 'ld_capacity': ld_capacity,
+ 'RPL Attribute': rplatr,
+ 'Purpose': purpose}
+ pools[pool_num]['ld_list'].append(ld)
+ lds[ldname] = ld
+ used_ldns.append(ldn)
+ return lds, used_ldns
+
+ def get_iscsi_ldset_config(self, xml, root):
+ ldsets = {}
+ for xmlobj in root.xpath('./'
+ 'CMD_REQUEST/'
+ 'CHAPTER[@name="Access Control"]/'
+ 'OBJECT[@name="LD Set(iSCSI)"]'):
+ ldsetlds = {}
+ portals = []
+ for unit in xmlobj.xpath('./SECTION[@name="Portal"]/'
+ 'UNIT[@name="Portal"]'):
+ if not unit.text.startswith('0.0.0.0:'):
+ portals.append(unit.text)
+ section = xmlobj.find('./SECTION[@name="LD Set(iSCSI)'
+ ' Information"]')
+ if section is None:
+ return ldsets
+ unit = section.find('./UNIT[@name="Platform"]')
+ platform = unit.text
+ unit = section.find('./UNIT[@name="LD Set Name"]')
+ ldsetname = platform + ':' + unit.text
+ unit = section.find('./UNIT[@name="Target Mode"]')
+ tmode = unit.text
+ if tmode == 'Normal':
+ unit = section.find('./UNIT[@name="Target Name"]')
+ iqn = unit.text
+ for section in xmlobj.xpath('./SECTION[@name="LUN/LD List"]'):
+ unit = section.find('./UNIT[@name="LDN(h)"]')
+ ldn = int(unit.text, 16)
+ unit = section.find('./UNIT[@name="LUN(h)"]')
+ lun = int(unit.text, 16)
+ ld = {'ldn': ldn,
+ 'lun': lun,
+ 'iqn': iqn}
+ ldsetlds[ldn] = ld
+ elif tmode == 'Multi-Target':
+ for section in xmlobj.xpath('./SECTION[@name='
+ '"Target Information For '
+ 'Multi-Target Mode"]'):
+ unit = section.find('./UNIT[@name="Target Name"]')
+ iqn = unit.text
+ unit = section.find('./UNIT[@name="LDN(h)"]')
+ if unit.text.startswith('-'):
+ continue
+ ldn = int(unit.text, 16)
+ unit = section.find('./UNIT[@name="LUN(h)"]')
+ if unit.text.startswith('-'):
+ continue
+ lun = int(unit.text, 16)
+ ld = {'ldn': ldn,
+ 'lun': lun,
+ 'iqn': iqn}
+ ldsetlds[ldn] = ld
+ ldset = {'ldsetname': ldsetname,
+ 'protocol': 'iSCSI',
+ 'portal_list': portals,
+ 'lds': ldsetlds}
+ ldsets[ldsetname] = ldset
+ return ldsets
+
+ def get_fc_ldset_config(self, xml, root):
+ ldsets = {}
+ for xmlobj in root.xpath('./'
+ 'CMD_REQUEST/'
+ 'CHAPTER[@name="Access Control"]/'
+ 'OBJECT[@name="LD Set(FC)"]'):
+ ldsetlds = {}
+ section = xmlobj.find('./SECTION[@name="LD Set(FC)'
+ ' Information"]')
+ if section is None:
+ return ldsets
+ unit = section.find('./UNIT[@name="Platform"]')
+ platform = unit.text
+ unit = section.find('./UNIT[@name="LD Set Name"]')
+ ldsetname = platform + ':' + unit.text
+ wwpns = []
+ ports = []
+ for section in xmlobj.xpath('./SECTION[@name="Path List"]'):
+ unit = section.find('./UNIT[@name="Path"]')
+ if unit.text.find('(') != -1:
+ ports.append(unit.text)
+ else:
+ wwpns.append(unit.text)
+ for section in xmlobj.xpath('./SECTION[@name="LUN/LD List"]'):
+ unit = section.find('./UNIT[@name="LDN(h)"]')
+ ldn = int(unit.text, 16)
+ unit = section.find('./UNIT[@name="LUN(h)"]')
+ lun = int(unit.text, 16)
+ ld = {'ldn': ldn,
+ 'lun': lun}
+ ldsetlds[ldn] = ld
+ ldset = {'ldsetname': ldsetname,
+ 'lds': ldsetlds,
+ 'protocol': 'FC',
+ 'wwpn': wwpns,
+ 'port': ports}
+ ldsets[ldsetname] = ldset
+ return ldsets
+
+ def get_hostport_config(self, xml, root):
+ hostports = {}
+ for section in root.xpath('./'
+ 'CMD_REQUEST/'
+ 'CHAPTER[@name="Controller"]/'
+ 'OBJECT[@name="Host Port"]/'
+ 'SECTION[@name="Host Director'
+ '/Host Port Information"]'):
+ unit = section.find('./UNIT[@name="Port No.(h)"]')
+ units = unit.text.split('-')
+ director = int(units[0], 16)
+ port = int(units[1], 16)
+ unit = section.find('./UNIT[@name="IP Address"]')
+ if unit is not None:
+ ip = unit.text
+ protocol = 'iSCSI'
+ wwpn = None
+ else:
+ ip = '0.0.0.0'
+ protocol = 'FC'
+ unit = section.find('./UNIT[@name="WWPN"]')
+ wwpn = unit.text
+
+ # Port Link Status check Start.
+ unit = section.find('./UNIT[@name="Link Status"]')
+ hostport = {
+ 'director': director,
+ 'port': port,
+ 'ip': ip,
+ 'protocol': protocol,
+ 'wwpn': wwpn
+ }
+ if director not in hostports:
+ hostports[director] = []
+ hostports[director].append(hostport)
+ return hostports
+
+ def configs(self, xml):
+ root = etree.fromstring(xml)
+ pools = self.get_pool_config(xml, root)
+ lds, used_ldns = self.get_ld_config(xml, root, pools)
+ iscsi_ldsets = self.get_iscsi_ldset_config(xml, root)
+ fc_ldsets = self.get_fc_ldset_config(xml, root)
+ hostports = self.get_hostport_config(xml, root)
+ diskarray_max_ld_count = self.get_diskarray_max_ld_count()
+
+ ldsets = {}
+ ldsets.update(iscsi_ldsets)
+ ldsets.update(fc_ldsets)
+
+ return pools, lds, ldsets, used_ldns, hostports, diskarray_max_ld_count
+
+ def get_volume_type_qos_specs(self, volume):
+ return {}
+
+ def check_io_parameter(self, specs):
+ pass
diff --git a/cinder/volume/drivers/nec/__init__.py b/cinder/volume/drivers/nec/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/cinder/volume/drivers/nec/cli.py b/cinder/volume/drivers/nec/cli.py
new file mode 100644
index 00000000000..178ef9d659e
--- /dev/null
+++ b/cinder/volume/drivers/nec/cli.py
@@ -0,0 +1,781 @@
+#
+# Copyright (c) 2016 NEC Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import re
+import select
+import time
+import traceback
+
+from oslo_log import log as logging
+from oslo_utils import excutils
+from oslo_utils import units
+
+from cinder import coordination
+from cinder import exception
+from cinder.i18n import _
+from cinder import ssh_utils
+
+LOG = logging.getLogger(__name__)
+
+retry_msgids = ['iSM31005', 'iSM31015', 'iSM42408', 'iSM42412']
+
+
+class MStorageISMCLI(object):
+ """SSH client."""
+
+ def __init__(self, properties):
+ super(MStorageISMCLI, self).__init__()
+
+ self._sshpool = None
+ self._properties = properties
+
+ def _execute(self, command, expected_status=[0], raise_exec=True):
+ return self._sync_execute(command, self._properties['diskarray_name'],
+ expected_status, raise_exec)
+
+ @coordination.synchronized('mstorage_ismcli_execute_{diskarray_name}')
+ def _sync_execute(self, command, diskarray_name,
+ expected_status=[0], raise_exec=True):
+ retry_flag = True
+ retry_count = 0
+ while retry_flag is True:
+ try:
+ out, err, status = self._cli_execute(command, expected_status,
+ False)
+ if status != 0:
+ errflg = 0
+ errnum = out + err
+ LOG.debug('ismcli failed (errnum=%s).', errnum)
+ for retry_msgid in retry_msgids:
+ if errnum.find(retry_msgid) >= 0:
+ LOG.debug('`%(command)s` failed. '
+ '%(name)s %(errnum)s '
+ 'retry_count=%(retry_count)d',
+ {'command': command,
+ 'name': __name__,
+ 'errnum': errnum,
+ 'retry_count': retry_count})
+ errflg = 1
+ break
+ if errflg == 1:
+ retry_count += 1
+ if retry_count >= 60:
+ msg = (_('Timeout `%(command)s`.'
+ ' status=%(status)d, '
+ 'out="%(out)s", '
+ 'err="%(err)s".') %
+ {'command': command,
+ 'status': status,
+ 'out': out,
+ 'err': err})
+ raise exception.APITimeout(msg)
+ time.sleep(5)
+ continue
+ else:
+ if raise_exec is True:
+ msg = _('Command `%s` failed.') % command
+ raise exception.VolumeBackendAPIException(data=msg)
+ except EOFError:
+ with excutils.save_and_reraise_exception() as ctxt:
+ LOG.debug('EOFError has occurred. '
+ '%(name)s retry_count=%(retry_count)d',
+ {'name': __name__,
+ 'retry_count': retry_count})
+ retry_count += 1
+ if retry_count < 60:
+ ctxt.reraise = False
+ time.sleep(5)
+ continue
+ retry_flag = False
+
+ return out, err, status
+
+ def _execute_nolock(self, command, expected_status=[0], raise_exec=True):
+ retry_flag = True
+ retry_count = 0
+ while retry_flag is True:
+ try:
+ out, err, status = self._cli_execute(command, expected_status,
+ raise_exec)
+ except EOFError:
+ with excutils.save_and_reraise_exception() as ctxt:
+ LOG.debug('EOFError has occurred. '
+ '%(name)s retry_count=%(retry_count)d',
+ {'name': __name__,
+ 'retry_count': retry_count})
+ retry_count += 1
+ if retry_count < 60:
+ ctxt.reraise = False
+ time.sleep(5)
+ continue
+ retry_flag = False
+ return out, err, status
+
+ def _cli_execute(self, command, expected_status=[0], raise_exec=True):
+ if not self._sshpool:
+ LOG.debug('ssh_utils.SSHPool execute.')
+ self._sshpool = ssh_utils.SSHPool(
+ self._properties['cli_fip'],
+ self._properties['ssh_pool_port_number'],
+ self._properties['ssh_conn_timeout'],
+ self._properties['cli_user'],
+ self._properties['cli_password'],
+ privatekey=self._properties['cli_privkey'])
+
+ with self._sshpool.item() as ssh:
+ LOG.debug('`%s` executing...', command)
+ stdin, stdout, stderr = ssh.exec_command(command)
+ stdin.close()
+ channel = stdout.channel
+ _out, _err = [], []
+ while 1:
+ select.select([channel], [], [])
+ if channel.recv_ready():
+ _out.append(channel.recv(4096))
+ continue
+ if channel.recv_stderr_ready():
+ _err.append(channel.recv_stderr(4096))
+ continue
+ if channel.exit_status_ready():
+ status = channel.recv_exit_status()
+ break
+ LOG.debug('`%(command)s` done. status=%(status)d.',
+ {'command': command, 'status': status})
+ out, err = ''.join(_out), ''.join(_err)
+ if expected_status is not None and status not in expected_status:
+ LOG.debug('`%(command)s` failed. status=%(status)d, '
+ 'out="%(out)s", err="%(err)s".',
+ {'command': command, 'status': status,
+ 'out': out, 'err': err})
+ if raise_exec is True:
+ msg = _('Command `%s` failed.') % command
+ raise exception.VolumeBackendAPIException(data=msg)
+ return out, err, status
+
+ def view_all(self, conf_ismview_path=None, delete_ismview=True,
+ cmd_lock=True):
+ if self._properties['queryconfig_view'] is True:
+ command = 'clioutmsg xml; iSMview'
+ if self._properties['ismview_alloptimize'] is True:
+ command += ' --alloptimize'
+ else:
+ command += ' -all'
+ else:
+ command = 'iSMquery -cinder -xml -all'
+ if cmd_lock is True:
+ out, err, status = self._execute(command)
+ else:
+ out, err, status = self._execute_nolock(command)
+
+ exstats = re.compile("(.*)ExitStatus(.*)\n")
+ tmpout = exstats.sub('', out)
+ out = tmpout
+ if conf_ismview_path is not None:
+ if delete_ismview:
+ if os.path.exists(conf_ismview_path):
+ os.remove(conf_ismview_path)
+ LOG.debug('Remove clioutmsg xml to %s.',
+ conf_ismview_path)
+ else:
+ with open(conf_ismview_path, 'w+') as f:
+ f.write(out)
+ LOG.debug('Wrote clioutmsg xml to %s.',
+ conf_ismview_path)
+ return out
+
+ def get_poolnumber_and_ldnumber(self, pools, used_ldns, max_ld_count):
+ selected_pool = -1
+ min_ldn = 0
+ for pool in pools:
+ nld = len(pool['ld_list'])
+ if selected_pool == -1 or min_ldn > nld:
+ selected_pool = pool['pool_num']
+ min_ldn = nld
+ for ldn in range(0, max_ld_count + 1):
+ if ldn not in used_ldns:
+ break
+ if ldn > max_ld_count - 1:
+ msg = _('All Logical Disk numbers are used.')
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ return selected_pool, ldn
+
+ def ldbind(self, name, pool, ldn, size):
+ """Bind an LD and attach a nickname to it."""
+ errnum = ""
+ cmd = ('iSMcfg ldbind -poolnumber %(poolnumber)d -ldn %(ldn)d '
+ '-capacity %(capacity)d -immediate'
+ % {'poolnumber': pool, 'ldn': ldn,
+ 'capacity': size})
+ out, err, status = self._execute(cmd, [0], False)
+ errnum = err
+ if status != 0:
+ return False, errnum
+
+ cmd = ('iSMcfg nickname -ldn %(ldn)d -newname %(newname)s '
+ '-immediate'
+ % {'ldn': ldn, 'newname': name})
+ self._execute(cmd)
+ return True, errnum
+
+ def unbind(self, name):
+ """Unbind an LD."""
+ cmd = 'iSMcfg ldunbind -ldname %s' % name
+ self._execute(cmd)
+
+ def expand(self, ldn, capacity):
+ """Expand a LD."""
+ cmd = ('iSMcfg ldexpand -ldn %(ldn)d -capacity %(capacity)d '
+ '-unit gb'
+ % {'ldn': ldn, 'capacity': capacity})
+ self._execute(cmd)
+
+ def addldsetld(self, ldset, ldname, lun=None):
+ """Add an LD to specified LD Set."""
+ if lun is None:
+ cmd = ('iSMcfg addldsetld -ldset %(ldset)s '
+ '-ldname %(ldname)s'
+ % {'ldset': ldset, 'ldname': ldname})
+ self._execute(cmd)
+ else:
+ cmd = ('iSMcfg addldsetld -ldset %(ldset)s -ldname %(ldname)s '
+ '-lun %(lun)d'
+ % {'ldset': ldset, 'ldname': ldname,
+ 'lun': lun})
+ self._execute(cmd)
+
+ def delldsetld(self, ldset, ldname):
+ """Delete an LD from specified LD Set."""
+ rtn = True
+ errnum = ""
+ cmd = ('iSMcfg delldsetld -ldset %(ldset)s '
+ '-ldname %(ldname)s'
+ % {'ldset': ldset,
+ 'ldname': ldname})
+ out, err, status = self._execute(cmd, [0], False)
+ errnum = err
+ if status != 0:
+ rtn = False
+ return rtn, errnum
+
+ def changeldname(self, ldn, new_name, old_name=None):
+ """Rename nickname of LD."""
+ if old_name is None:
+ cmd = ('iSMcfg nickname -ldn %(ldn)d -newname %(newname)s '
+ '-immediate'
+ % {'ldn': ldn, 'newname': new_name})
+ self._execute(cmd)
+ else:
+ cmd = ('iSMcfg nickname -ldname %(ldname)s '
+ '-newname %(newname)s'
+ % {'ldname': old_name,
+ 'newname': new_name})
+ self._execute(cmd)
+
+ def setpair(self, mvname, rvname):
+ """Set pair."""
+ cmd = ('iSMrc_pair -pair -mv %(mv)s -mvflg ld '
+ '-rv %(rv)s -rvflg ld'
+ % {'mv': mvname, 'rv': rvname})
+ self._execute(cmd)
+
+ LOG.debug('Pair command completed. MV = %(mv)s RV = %(rv)s.',
+ {'mv': mvname, 'rv': rvname})
+
+ def unpair(self, mvname, rvname, flag):
+ """Unset pair."""
+ if flag == 'normal':
+ cmd = ('iSMrc_pair -unpair -mv %(mv)s -mvflg ld '
+ '-rv %(rv)s -rvflg ld'
+ % {'mv': mvname, 'rv': rvname})
+ self._execute(cmd)
+ elif flag == 'force':
+ cmd = ('iSMrc_pair -unpair -mv %(mv)s -mvflg ld '
+ '-rv %(rv)s -rvflg ld -force all'
+ % {'mv': mvname, 'rv': rvname})
+ self._execute(cmd)
+ else:
+ LOG.debug('unpair flag ERROR. flag = %s', flag)
+
+ LOG.debug('Unpair command completed. MV = %(mv)s, RV = %(rv)s.',
+ {'mv': mvname, 'rv': rvname})
+
+ def replicate(self, mvname, rvname, flag):
+ if flag == 'full':
+ cmd = ('iSMrc_replicate -mv %(mv)s -mvflg ld '
+ '-rv %(rv)s -rvflg ld -nowait -cprange full '
+ '-cpmode bg'
+ % {'mv': mvname, 'rv': rvname})
+ self._execute(cmd)
+ else:
+ cmd = ('iSMrc_replicate -mv %(mv)s -mvflg ld '
+ '-rv %(rv)s -rvflg ld -nowait -cpmode bg'
+ % {'mv': mvname, 'rv': rvname})
+ self._execute(cmd)
+
+ LOG.debug('Replicate command completed. MV = %(mv)s RV = %(rv)s.',
+ {'mv': mvname, 'rv': rvname})
+
+ def separate(self, mvname, rvname, flag):
+ """Separate for backup."""
+ if flag == 'backup':
+ cmd = ('iSMrc_separate -mv %(mv)s -mvflg ld '
+ '-rv %(rv)s -rvflg ld '
+ '-rvacc ro -rvuse complete -nowait'
+ % {'mv': mvname, 'rv': rvname})
+ self._execute(cmd)
+ elif flag == 'restore' or flag == 'clone':
+ cmd = ('iSMrc_separate -mv %(mv)s -mvflg ld '
+ '-rv %(rv)s -rvflg ld '
+ '-rvacc rw -rvuse immediate -nowait'
+ % {'mv': mvname, 'rv': rvname})
+ self._execute(cmd)
+ elif flag == 'esv_restore' or flag == 'migrate':
+ cmd = ('iSMrc_separate -mv %(mv)s -mvflg ld '
+ '-rv %(rv)s -rvflg ld '
+ '-rvacc rw -rvuse complete -nowait'
+ % {'mv': mvname, 'rv': rvname})
+ self._execute(cmd)
+ else:
+ LOG.debug('separate flag ERROR. flag = %s', flag)
+
+ LOG.debug('Separate command completed. MV = %(mv)s RV = %(rv)s.',
+ {'mv': mvname, 'rv': rvname})
+
+ def query_MV_RV_status(self, ldname, rpltype):
+ if rpltype == 'MV':
+ cmd = ('iSMrc_query -mv %s -mvflg ld | '
+ 'while builtin read line;'
+ 'do if [[ "$line" =~ "Sync State" ]]; '
+ 'then builtin echo ${line:10};fi;'
+ 'done' % ldname)
+ out, err, status = self._execute(cmd)
+ elif rpltype == 'RV':
+ cmd = ('iSMrc_query -rv %s -rvflg ld | '
+ 'while builtin read line;'
+ 'do if [[ "$line" =~ "Sync State" ]]; '
+ 'then builtin echo ${line:10};fi;'
+ 'done' % ldname)
+ out, err, status = self._execute(cmd)
+ else:
+ LOG.debug('rpltype flag ERROR. rpltype = %s', rpltype)
+
+ query_status = out.strip()
+ return query_status
+
+ def query_MV_RV_name(self, ldname, rpltype):
+ if rpltype == 'MV':
+ cmd = ('iSMrc_query -mv %s -mvflg ld | '
+ 'while builtin read line;'
+ 'do if [[ "$line" =~ "LD Name" ]]; '
+ 'then builtin echo ${line:7};fi;'
+ 'done' % ldname)
+ out, err, status = self._execute(cmd)
+ out = out.replace(ldname, "")
+ elif rpltype == 'RV':
+ cmd = ('iSMrc_query -rv %s -rvflg ld | '
+ 'while builtin read line;'
+ 'do if [[ "$line" =~ "LD Name" ]]; '
+ 'then builtin echo ${line:7};fi;'
+ 'done' % ldname)
+ out, err, status = self._execute(cmd)
+ out = out.replace(ldname, "")
+ else:
+ LOG.debug('rpltype flag ERROR. rpltype = %s', rpltype)
+
+ query_name = out.strip()
+ return query_name
+
+ def query_MV_RV_diff(self, ldname, rpltype):
+ if rpltype == 'MV':
+ cmd = ('iSMrc_query -mv %s -mvflg ld | '
+ 'while builtin read line;'
+ 'do if [[ "$line" =~ "Separate Diff" ]]; '
+ 'then builtin echo ${line:13};fi;'
+ 'done' % ldname)
+ out, err, status = self._execute(cmd)
+ elif rpltype == 'RV':
+ cmd = ('iSMrc_query -rv %s -rvflg ld | '
+ 'while builtin read line;'
+ 'do if [[ "$line" =~ "Separate Diff" ]]; '
+ 'then builtin echo ${line:13};fi;'
+ 'done' % ldname)
+ out, err, status = self._execute(cmd)
+ else:
+ LOG.debug('rpltype flag ERROR. rpltype = %s', rpltype)
+
+ query_status = out.strip()
+ return query_status
+
+ def backup_restore(self, volume_properties, unpairWait, canPairing=True):
+
+ # Setting Pair.
+ flag = 'full'
+ if canPairing is True:
+ self.setpair(volume_properties['mvname'][3:],
+ volume_properties['rvname'][3:])
+ else:
+ rv_diff = self.query_MV_RV_diff(volume_properties['rvname'][3:],
+ 'RV')
+ rv_diff = int(rv_diff.replace('KB', ''), 10) // units.Ki
+ if rv_diff != volume_properties['capacity']:
+ flag = None
+
+ # Replicate.
+ self.replicate(volume_properties['mvname'][3:],
+ volume_properties['rvname'][3:], flag)
+
+ # Separate.
+ self.separate(volume_properties['mvname'][3:],
+ volume_properties['rvname'][3:],
+ volume_properties['flag'])
+
+ unpairProc = unpairWait(volume_properties, self)
+ unpairProc.run()
+
+ def check_ld_existed_rplstatus(self, lds, ldname, snapshot, flag):
+
+ if ldname not in lds:
+ if flag == 'backup':
+ LOG.debug('Volume Id not found. '
+ 'LD name = %(name)s volume_id = %(id)s.',
+ {'name': ldname, 'id': snapshot['volume_id']})
+ raise exception.NotFound(_('Logical Disk does not exist.'))
+ elif flag == 'restore':
+ LOG.debug('Snapshot Id not found. '
+ 'LD name = %(name)s snapshot_id = %(id)s.',
+ {'name': ldname, 'id': snapshot['id']})
+ raise exception.NotFound(_('Logical Disk does not exist.'))
+ elif flag == 'delete':
+ LOG.debug('LD `%(name)s` already unbound? '
+ 'snapshot_id = %(id)s.',
+ {'name': ldname, 'id': snapshot['id']})
+ return None
+ else:
+ LOG.debug('check_ld_existed_rplstatus flag error flag = %s.',
+ flag)
+ raise exception.NotFound(_('Logical Disk does not exist.'))
+
+ ld = lds[ldname]
+
+ if ld['RPL Attribute'] == 'IV':
+ pass
+ elif ld['RPL Attribute'] == 'MV':
+ query_status = self.query_MV_RV_status(ldname[3:], 'MV')
+ LOG.debug('query_status : %s.', query_status)
+ if(query_status == 'separated'):
+ # unpair.
+ rvname = self.query_MV_RV_name(ldname[3:], 'MV')
+ self.unpair(ldname[3:], rvname, 'force')
+ else:
+ msg = _('Specified Logical Disk %s has been copied.') % ldname
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ elif ld['RPL Attribute'] == 'RV':
+ query_status = self.query_MV_RV_status(ldname[3:], 'RV')
+ if query_status == 'separated':
+ # unpair.
+ mvname = self.query_MV_RV_name(ldname[3:], 'RV')
+ self.unpair(mvname, ldname[3:], 'force')
+ else:
+ msg = _('Specified Logical Disk %s has been copied.') % ldname
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ return ld
+
+ def get_pair_lds(self, ldname, lds):
+ query_status = self.query_MV_RV_name(ldname[3:], 'MV')
+ query_status = query_status.split('\n')
+ query_status = [query for query in query_status if query != '']
+ LOG.debug('query_status=%s.', query_status)
+
+ pair_lds = {}
+ for rvname in query_status:
+ rvname = self._properties['ld_backupname_format'] % rvname
+ if rvname not in lds:
+ LOG.debug('LD `%s` is RDR pair?', rvname)
+ else:
+ ld = lds[rvname]
+ ldn = ld['ldn']
+ pair_lds[ldn] = ld
+
+ LOG.debug('pair_lds=%s.', pair_lds)
+ return pair_lds
+
+ def snapshot_create(self, bvname, svname, poolnumber):
+ """Snapshot create."""
+ cmd = ('iSMcfg generationadd -bvname %(bvname)s '
+ '-poolnumber %(poolnumber)d -count 1 '
+ '-svname %(svname)s'
+ % {'bvname': bvname,
+ 'poolnumber': poolnumber,
+ 'svname': svname})
+ self._execute(cmd)
+
+ cmd = ('iSMsc_create -bv %(bv)s -bvflg ld -sv %(sv)s '
+ '-svflg ld'
+ % {'bv': bvname[3:], 'sv': svname})
+ self._execute(cmd)
+
+ def snapshot_delete(self, bvname, svname):
+ """Snapshot delete."""
+ query_status = self.query_BV_SV_status(bvname[3:], svname)
+ if query_status == 'snap/active':
+ cmd = ('iSMsc_delete -bv %(bv)s -bvflg ld -sv %(sv)s '
+ '-svflg ld'
+ % {'bv': bvname[3:], 'sv': svname})
+ self._execute(cmd)
+
+ while True:
+ query_status = self.query_BV_SV_status(bvname[3:], svname)
+ if query_status == 'snap/deleting':
+ LOG.debug('Sleep 1 seconds Start')
+ time.sleep(1)
+ else:
+ break
+ else:
+ LOG.debug('The snapshot data does not exist,'
+ ' because already forced deletion.'
+ ' bvname=%(bvname)s, svname=%(svname)s',
+ {'bvname': bvname, 'svname': svname})
+
+ cmd = 'iSMcfg generationdel -bvname %s -count 1' % bvname
+ self._execute(cmd)
+
+ def query_BV_SV_status(self, bvname, svname):
+ cmd = ('iSMsc_query -bv %(bv)s -bvflg ld -sv %(sv)s -svflg ld '
+ '-summary | '
+ 'while builtin read line;do '
+ 'if [[ "$line" =~ "%(line)s" ]]; '
+ 'then builtin echo "$line";fi;done'
+ % {'bv': bvname, 'sv': svname, 'line': svname})
+ out, err, status = self._execute(cmd)
+
+ query_status = out[34:48].strip()
+ LOG.debug('snap/state:%s.', query_status)
+ return query_status
+
+ def set_io_limit(self, ldname, specs, force_delete=True):
+ if specs['upperlimit'] is not None:
+ upper = int(specs['upperlimit'], 10)
+ else:
+ upper = None
+
+ if specs['lowerlimit'] is not None:
+ lower = int(specs['lowerlimit'], 10)
+ else:
+ lower = None
+
+ report = specs['upperreport']
+ if upper is None and lower is None and report is None:
+ return
+ cmd = 'iSMioc setlimit -ldname %s' % ldname
+ if upper is not None:
+ cmd += ' -upperlimit %d' % upper
+ if lower is not None:
+ cmd += ' -lowerlimit %d' % lower
+ if report is not None:
+ cmd += ' -upperreport %s' % report
+ try:
+ self._execute(cmd)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ if force_delete:
+ self.unbind(ldname)
+
+
+class UnpairWait(object):
+ error_updates = {'status': 'error',
+ 'progress': '100%',
+ 'migration_status': None}
+
+ def __init__(self, volume_properties, cli):
+ super(UnpairWait, self).__init__()
+ self._volume_properties = volume_properties
+ self._mvname = volume_properties['mvname'][3:]
+ self._rvname = volume_properties['rvname'][3:]
+ self._mvID = volume_properties['mvid']
+ self._rvID = volume_properties['rvid']
+ self._flag = volume_properties['flag']
+ self._context = volume_properties['context']
+ self._cli = cli
+ self._local_conf = self._cli._properties
+
+ def _wait(self, unpair=True):
+ timeout = self._local_conf['thread_timeout'] * 24
+ start_time = time.time()
+ while True:
+ cur_time = time.time()
+ if (cur_time - start_time) > timeout:
+ raise exception.APITimeout(_('UnpairWait wait timeout.'))
+
+ LOG.debug('Sleep 60 seconds Start')
+ time.sleep(60)
+
+ query_status = self._cli.query_MV_RV_status(self._rvname, 'RV')
+ if query_status == 'separated':
+ if unpair is True:
+ self._cli.unpair(self._mvname, self._rvname, 'normal')
+ break
+ elif query_status == 'sep/exec':
+ continue
+ else:
+ LOG.debug('iSMrc_query command result abnormal.'
+ 'Query status = %(status)s, RV = %(rv)s.',
+ {'status': query_status, 'rv': self._rvname})
+ break
+
+ def run(self):
+ try:
+ self._execute()
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.debug('UnpairWait Unexpected error. '
+ 'exception=%(exception)s, MV = %(mv)s, RV = %(rv)s.',
+ {'exception': traceback.format_exc(),
+ 'mv': self._mvname, 'rv': self._rvname})
+
+ def _execute(self):
+ pass
+
+
+class UnpairWaitForBackup(UnpairWait):
+ def __init__(self, volume_properties, cli):
+ super(UnpairWaitForBackup, self).__init__(volume_properties, cli)
+
+ def _execute(self):
+ LOG.debug('UnpairWaitForBackup start.')
+
+ self._wait(True)
+
+
+class UnpairWaitForRestore(UnpairWait):
+ def __init__(self, volume_properties, cli):
+ super(UnpairWaitForRestore, self).__init__(volume_properties, cli)
+
+ self._rvldn = None
+ if ('rvldn' in volume_properties and
+ volume_properties['rvldn'] is not None):
+ self._rvldn = volume_properties['rvldn']
+
+ self._rvcapacity = None
+ if ('rvcapacity' in volume_properties and
+ volume_properties['rvcapacity'] is not None):
+ self._rvcapacity = volume_properties['rvcapacity']
+
+ def _execute(self):
+ LOG.debug('UnpairWaitForRestore start.')
+
+ self._wait(True)
+
+ if self._rvcapacity is not None:
+ try:
+ self._cli.expand(self._rvldn, self._rvcapacity)
+ except exception.CinderException:
+ with excutils.save_and_reraise_exception():
+ LOG.debug('UnpairWaitForDDRRestore expand error. '
+ 'exception=%(exception)s, '
+ 'MV = %(mv)s, RV = %(rv)s.',
+ {'exception': traceback.format_exc(),
+ 'mv': self._mvname, 'rv': self._rvname})
+
+
+class UnpairWaitForClone(UnpairWait):
+ def __init__(self, volume_properties, cli):
+ super(UnpairWaitForClone, self).__init__(volume_properties, cli)
+
+ self._rvldn = None
+ if ('rvldn' in volume_properties and
+ volume_properties['rvldn'] is not None):
+ self._rvldn = volume_properties['rvldn']
+
+ self._rvcapacity = None
+ if ('rvcapacity' in volume_properties and
+ volume_properties['rvcapacity'] is not None):
+ self._rvcapacity = volume_properties['rvcapacity']
+
+ def _execute(self):
+ LOG.debug('UnpairWaitForClone start.')
+
+ self._wait(True)
+
+ if self._rvcapacity is not None:
+ try:
+ self._cli.expand(self._rvldn, self._rvcapacity)
+ except exception.CinderException:
+ with excutils.save_and_reraise_exception():
+ LOG.debug('UnpairWaitForClone expand error. '
+ 'exception=%(exception)s, '
+ 'MV = %(mv)s, RV = %(rv)s.',
+ {'exception': traceback.format_exc(),
+ 'mv': self._mvname, 'rv': self._rvname})
+
+
+class UnpairWaitForMigrate(UnpairWait):
+ def __init__(self, volume_properties, cli):
+ super(UnpairWaitForMigrate, self).__init__(volume_properties, cli)
+
+ def _execute(self):
+ LOG.debug('UnpairWaitForMigrate start.')
+
+ self._wait(True)
+
+ self._cli.unbind(self._volume_properties['mvname'])
+ self._cli.changeldname(None, self._volume_properties['mvname'],
+ self._volume_properties['rvname'])
+
+
+class UnpairWaitForDDRBackup(UnpairWaitForBackup):
+ def __init__(self, volume_properties, cli):
+ super(UnpairWaitForDDRBackup, self).__init__(volume_properties, cli)
+
+ def _execute(self):
+ LOG.debug('UnpairWaitForDDRBackup start.')
+
+ self._wait(False)
+
+
+class UnpairWaitForDDRRestore(UnpairWaitForRestore):
+ def __init__(self, volume_properties, cli):
+ super(UnpairWaitForDDRRestore, self).__init__(volume_properties, cli)
+
+ self._prev_mvname = None
+ if ('prev_mvname' in volume_properties and
+ volume_properties['prev_mvname'] is not None):
+ self._prev_mvname = volume_properties['prev_mvname'][3:]
+
+ def _execute(self):
+ LOG.debug('UnpairWaitForDDRRestore start.')
+
+ self._wait(True)
+
+ if self._rvcapacity is not None:
+ try:
+ self._cli.expand(self._rvldn, self._rvcapacity)
+ except exception.CinderException:
+ with excutils.save_and_reraise_exception():
+ LOG.debug('UnpairWaitForDDRRestore expand error. '
+ 'exception=%(exception)s, '
+ 'MV = %(mv)s, RV = %(rv)s.',
+ {'exception': traceback.format_exc(),
+ 'mv': self._mvname, 'rv': self._rvname})
+
+ if self._prev_mvname is not None:
+ self._cli.setpair(self._prev_mvname, self._mvname)
diff --git a/cinder/volume/drivers/nec/product.xml b/cinder/volume/drivers/nec/product.xml
new file mode 100644
index 00000000000..6bbbb1b760e
--- /dev/null
+++ b/cinder/volume/drivers/nec/product.xml
@@ -0,0 +1,27 @@
+
+
+ NEC
+
+ 8192
+ 8192
+ 4096
+ 4096
+ 1024
+ 1024
+ 8192
+ 8192
+ 4096
+ 4096
+ 1024
+ 1024
+ 8192
+ 8192
+ 8192
+ 4096
+ 4096
+ 4096
+ 4096
+ 1024
+ 1024
+
+
diff --git a/cinder/volume/drivers/nec/volume.py b/cinder/volume/drivers/nec/volume.py
new file mode 100644
index 00000000000..7da180f8b77
--- /dev/null
+++ b/cinder/volume/drivers/nec/volume.py
@@ -0,0 +1,76 @@
+#
+# Copyright (c) 2016 NEC Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Drivers for M-Series Storage."""
+
+from cinder import interface
+from cinder.volume import driver
+from cinder.volume.drivers.nec import volume_helper
+from cinder.zonemanager import utils as fczm_utils
+
+
+@interface.volumedriver
+class MStorageISCSIDriver(volume_helper.MStorageDSVDriver,
+ driver.ISCSIDriver):
+ """M-Series Storage Snapshot iSCSI Driver."""
+
+ def __init__(self, *args, **kwargs):
+ super(MStorageISCSIDriver, self).__init__(*args, **kwargs)
+ self._set_config(self.configuration, self.host,
+ self.__class__.__name__)
+
+ def create_export(self, context, volume, connector):
+ return self.iscsi_do_export(context, volume, connector)
+
+ def ensure_export(self, context, volume):
+ pass
+
+ def get_volume_stats(self, refresh=False):
+ return self.iscsi_get_volume_stats(refresh)
+
+ def initialize_connection(self, volume, connector):
+ return self.iscsi_initialize_connection(volume, connector)
+
+ def terminate_connection(self, volume, connector, **kwargs):
+ return self.iscsi_terminate_connection(volume, connector)
+
+
+@interface.volumedriver
+class MStorageFCDriver(volume_helper.MStorageDSVDriver,
+ driver.FibreChannelDriver):
+ """M-Series Storage Snapshot FC Driver."""
+
+ def __init__(self, *args, **kwargs):
+ super(MStorageFCDriver, self).__init__(*args, **kwargs)
+ self._set_config(self.configuration, self.host,
+ self.__class__.__name__)
+
+ def create_export(self, context, volume, connector):
+ return self.fc_do_export(context, volume, connector)
+
+ def ensure_export(self, context, volume):
+ pass
+
+ def get_volume_stats(self, refresh=False):
+ return self.fc_get_volume_stats(refresh)
+
+ @fczm_utils.AddFCZone
+ def initialize_connection(self, volume, connector):
+ return self.fc_initialize_connection(volume, connector)
+
+ @fczm_utils.RemoveFCZone
+ def terminate_connection(self, volume, connector, **kwargs):
+ return self.fc_terminate_connection(volume, connector)
diff --git a/cinder/volume/drivers/nec/volume_common.py b/cinder/volume/drivers/nec/volume_common.py
new file mode 100644
index 00000000000..9e5c82405e6
--- /dev/null
+++ b/cinder/volume/drivers/nec/volume_common.py
@@ -0,0 +1,922 @@
+#
+# Copyright (c) 2016 NEC Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import errno
+from lxml import etree
+import os
+import re
+import six
+import traceback
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import excutils
+from oslo_utils import units
+
+from cinder import context
+from cinder import exception
+from cinder.i18n import _
+from cinder.volume.drivers.nec import cli
+from cinder.volume.drivers.san import san
+from cinder.volume import qos_specs
+from cinder.volume import volume_types
+
+
+LOG = logging.getLogger(__name__)
+
+FLAGS = cfg.CONF
+
+mstorage_opts = [
+ cfg.IPOpt('nec_ismcli_fip',
+ default=None,
+ help='FIP address of M-Series Storage iSMCLI.'),
+ cfg.StrOpt('nec_ismcli_user',
+ default='',
+ help='User name for M-Series Storage iSMCLI.'),
+ cfg.StrOpt('nec_ismcli_password',
+ secret=True,
+ default='',
+ help='Password for M-Series Storage iSMCLI.'),
+ cfg.StrOpt('nec_ismcli_privkey',
+ default='',
+ help='Filename of RSA private key for '
+ 'M-Series Storage iSMCLI.'),
+ cfg.StrOpt('nec_ldset',
+ default='',
+ help='M-Series Storage LD Set name for Compute Node.'),
+ cfg.StrOpt('nec_ldname_format',
+ default='LX:%s',
+ help='M-Series Storage LD name format for volumes.'),
+ cfg.StrOpt('nec_backup_ldname_format',
+ default='LX:%s',
+ help='M-Series Storage LD name format for snapshots.'),
+ cfg.StrOpt('nec_diskarray_name',
+ default='',
+ help='Diskarray name of M-Series Storage.'),
+ cfg.StrOpt('nec_ismview_dir',
+ default='/tmp/nec/cinder',
+ help='Output path of iSMview file.'),
+ cfg.StrOpt('nec_ldset_for_controller_node',
+ default='',
+ help='M-Series Storage LD Set name for Controller Node.'),
+ cfg.IntOpt('nec_ssh_pool_port_number',
+ default=22,
+ help='Port number of ssh pool.'),
+ cfg.IntOpt('nec_unpairthread_timeout',
+ default=3600,
+ help='Timeout value of Unpairthread.'),
+ cfg.IntOpt('nec_backend_max_ld_count',
+ default=1024,
+ help='Maximum number of managing sessions.'),
+ cfg.BoolOpt('nec_actual_free_capacity',
+ default=False,
+ help='Return actual free capacity.'),
+ cfg.BoolOpt('nec_ismview_alloptimize',
+ default=False,
+ help='Use legacy iSMCLI command with optimization.'),
+ cfg.ListOpt('nec_pools',
+ default=[],
+ help='M-Series Storage pool numbers list to be used.'),
+ cfg.ListOpt('nec_backup_pools',
+ default=[],
+ help='M-Series Storage backup pool number to be used.'),
+ cfg.BoolOpt('nec_queryconfig_view',
+ default=False,
+ help='Use legacy iSMCLI command.'),
+ cfg.IntOpt('nec_iscsi_portals_per_cont',
+ default=1,
+ help='Number of iSCSI portals.'),
+]
+
+FLAGS.register_opts(mstorage_opts)
+
+
+def convert_to_name(uuid):
+ alnum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ num = int(uuid.replace(("-"), ""), 16)
+
+ convertname = ""
+ while num != 0:
+ convertname = alnum[num % len(alnum)] + convertname
+ num = num - num % len(alnum)
+ num = num // len(alnum)
+ return convertname
+
+
+def convert_to_id(value62):
+ alnum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ length = len(value62)
+
+ weight = 0
+ value = 0
+ index = 0
+ for i in reversed(range(0, length)):
+ num = alnum.find(value62[i])
+ if index != 0:
+ value += int(weight * (num))
+ else:
+ value = num
+ index += 1
+ weight = 62 ** index
+
+ value = '%032x' % value
+
+ uuid = value[0:8]
+ uuid += '-'
+ uuid += value[8:12]
+ uuid += '-'
+ uuid += value[12:16]
+ uuid += '-'
+ uuid += value[16:20]
+ uuid += '-'
+ uuid += value[20:]
+
+ return uuid
+
+
+class MStorageVolumeCommon(object):
+ """M-Series Storage volume common class."""
+
+ def __init__(self, configuration, host, driver_name):
+ super(MStorageVolumeCommon, self).__init__()
+
+ self._host = host
+ self._driver_name = driver_name
+
+ self._configuration = configuration
+ self._configuration.append_config_values(mstorage_opts)
+ self._configuration.append_config_values(san.san_opts)
+ self._config_group = self._configuration.config_group
+
+ if self._config_group:
+ FLAGS.register_opts(mstorage_opts, group=self._config_group)
+ self._local_conf = FLAGS._get(self._config_group)
+ else:
+ FLAGS.register_opts(mstorage_opts)
+ self._local_conf = FLAGS
+
+ self._check_flags()
+ self._properties = self._set_properties()
+ self._cli = self._properties['cli']
+
+ def set_context(self, context):
+ self._context = context
+
+ def _check_flags(self):
+ for flag in ['nec_ismcli_fip', 'nec_ismcli_user']:
+ if getattr(self._local_conf, flag, '') == '':
+ raise exception.ParameterNotFound(param=flag)
+ if (getattr(self._local_conf, 'nec_ismcli_password', '') == '' and
+ getattr(self._local_conf, 'nec_ismcli_privkey', '') == ''):
+ msg = _('nec_ismcli_password nor nec_ismcli_privkey')
+ raise exception.ParameterNotFound(param=msg)
+
+ def _create_ismview_dir(self,
+ ismview_dir,
+ diskarray_name,
+ driver_name,
+ host):
+ """Create ismview directory."""
+ filename = diskarray_name
+ if filename == '':
+ filename = driver_name + '_' + host
+
+ ismview_path = os.path.join(ismview_dir, filename)
+ LOG.debug('ismview_path=%s.', ismview_path)
+ try:
+ if os.path.exists(ismview_path):
+ os.remove(ismview_path)
+ except OSError as e:
+ with excutils.save_and_reraise_exception() as ctxt:
+ if e.errno == errno.ENOENT:
+ ctxt.reraise = False
+
+ try:
+ os.makedirs(ismview_dir)
+ except OSError as e:
+ with excutils.save_and_reraise_exception() as ctxt:
+ if e.errno == errno.EEXIST:
+ ctxt.reraise = False
+
+ return ismview_path
+
+ def get_conf(self, host):
+ """Get another host group configurations."""
+ hostname = host['host']
+ hostname = hostname[:hostname.rindex('#')]
+ if '@' in hostname:
+ group = hostname.split('@')[1]
+ FLAGS.register_opts(mstorage_opts, group=group)
+ conf = FLAGS._get(group)
+ else:
+ FLAGS.register_opts(mstorage_opts)
+ conf = FLAGS
+ return conf
+
+ def get_conf_properties(self, conf=None):
+ if conf is None:
+ return self._properties
+ pool_pools = []
+ for pool in getattr(conf, 'nec_pools', []):
+ if pool.endswith('h'):
+ pool_pools.append(int(pool[:-1], 16))
+ else:
+ pool_pools.append(int(pool, 10))
+ pool_backup_pools = []
+ for pool in getattr(conf, 'nec_backup_pools', []):
+ if pool.endswith('h'):
+ pool_backup_pools.append(int(pool[:-1], 16))
+ else:
+ pool_backup_pools.append(int(pool, 10))
+ ldset_name = getattr(conf, 'nec_ldset', '')
+ ldset_controller_node_name = getattr(conf,
+ 'nec_ldset_for_controller_node',
+ '')
+
+ return {
+ 'cli_fip': conf.nec_ismcli_fip,
+ 'cli_user': conf.nec_ismcli_user,
+ 'cli_password': conf.nec_ismcli_password,
+ 'cli_privkey': conf.nec_ismcli_privkey,
+ 'pool_pools': pool_pools,
+ 'pool_backup_pools': pool_backup_pools,
+ 'pool_actual_free_capacity': conf.nec_actual_free_capacity,
+ 'ldset_name': ldset_name,
+ 'ldset_controller_node_name': ldset_controller_node_name,
+ 'ld_name_format': conf.nec_ldname_format,
+ 'ld_backupname_format': conf.nec_backup_ldname_format,
+ 'ld_backend_max_count': conf.nec_backend_max_ld_count,
+ 'thread_timeout': conf.nec_unpairthread_timeout,
+ 'ismview_dir': conf.nec_ismview_dir,
+ 'ismview_alloptimize': conf.nec_ismview_alloptimize,
+ 'ssh_conn_timeout': conf.ssh_conn_timeout,
+ 'ssh_pool_port_number': conf.nec_ssh_pool_port_number,
+ 'diskarray_name': conf.nec_diskarray_name,
+ 'queryconfig_view': conf.nec_queryconfig_view,
+ 'portal_number': conf.nec_iscsi_portals_per_cont,
+ 'reserved_percentage': conf.reserved_percentage
+ }
+
+ def _set_properties(self):
+ conf_properties = self.get_conf_properties(self._local_conf)
+
+ ismview_path = self._create_ismview_dir(
+ self._local_conf.nec_ismview_dir,
+ self._local_conf.nec_diskarray_name,
+ self._driver_name,
+ self._host)
+
+ vendor_name, _product_dict = self.get_oem_parameter()
+
+ backend_name = self._configuration.safe_get('volume_backend_name')
+
+ conf_properties['ismview_path'] = ismview_path
+ conf_properties['driver_name'] = self._driver_name
+ conf_properties['config_group'] = self._config_group
+ conf_properties['configuration'] = self._configuration
+ conf_properties['vendor_name'] = vendor_name
+ conf_properties['products'] = _product_dict
+ conf_properties['backend_name'] = backend_name
+ conf_properties['cli'] = cli.MStorageISMCLI(conf_properties)
+
+ return conf_properties
+
+ def get_oem_parameter(self):
+ product = os.path.join(os.path.dirname(__file__), 'product.xml')
+ try:
+ with open(product, 'r') as f:
+ xml = f.read()
+ root = etree.fromstring(xml)
+ vendor_name = root.xpath('./VendorName')[0].text
+
+ product_dict = {}
+ product_map = root.xpath('./ProductMap/Product')
+ for s in product_map:
+ product_dict[s.attrib['Name']] = int(s.text, 10)
+
+ return vendor_name, product_dict
+ except OSError as e:
+ with excutils.save_and_reraise_exception() as ctxt:
+ if e.errno == errno.ENOENT:
+ ctxt.reraise = False
+ raise exception.NotFound(_('%s not found.') % product)
+
+ @staticmethod
+ def get_ldname(volid, volformat):
+ alnum = ('0123456789'
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz')
+ ldname = ""
+ num = int(volid.replace(("-"), ""), 16)
+ while num != 0:
+ ldname = alnum[num % len(alnum)] + ldname
+ num = num - num % len(alnum)
+ num = num // len(alnum)
+
+ return volformat % ldname
+
+ def get_ldset(self, ldsets, metadata=None):
+ ldset = None
+ if metadata is not None and 'ldset' in metadata:
+ ldset_meta = metadata['ldset']
+ LOG.debug('ldset(metadata)=%s.', ldset_meta)
+ for tldset in six.itervalues(ldsets):
+ if tldset['ldsetname'] == ldset_meta:
+ ldset = ldsets[ldset_meta]
+ LOG.debug('ldset information(metadata specified)=%s.',
+ ldset)
+ break
+ if ldset is None:
+ msg = _('Logical Disk Set could not be found.')
+ LOG.error(msg)
+ raise exception.NotFound(msg)
+ elif self._properties['ldset_name'] == '':
+ nldset = len(ldsets)
+ if nldset == 0:
+ msg = _('Logical Disk Set could not be found.')
+ raise exception.NotFound(msg)
+ else:
+ ldset = None
+ else:
+ if self._properties['ldset_name'] not in ldsets:
+ msg = (_('Logical Disk Set `%s` could not be found.') %
+ self._properties['ldset_name'])
+ raise exception.NotFound(msg)
+ ldset = ldsets[self._properties['ldset_name']]
+ return ldset
+
+ def get_pool_capacity(self, pools, ldsets):
+ pools = [pool for (pn, pool) in six.iteritems(pools)
+ if len(self._properties['pool_pools']) == 0 or
+ pn in self._properties['pool_pools']]
+
+ free_capacity_gb = 0
+ total_capacity_gb = 0
+ for pool in pools:
+ # Convert to GB.
+ tmp_total = int(pool['total'] // units.Gi)
+ tmp_free = int(pool['free'] // units.Gi)
+
+ if free_capacity_gb < tmp_free:
+ total_capacity_gb = tmp_total
+ free_capacity_gb = tmp_free
+
+ return {'total_capacity_gb': total_capacity_gb,
+ 'free_capacity_gb': free_capacity_gb}
+
+ def set_backend_max_ld_count(self, xml, root):
+ section = root.xpath('./CMD_REQUEST')[0]
+ version = section.get('version').replace('Version ', '')[0:3]
+ version = float(version)
+ if version < 9.1:
+ if 512 < self._properties['ld_backend_max_count']:
+ self._properties['ld_backend_max_count'] = 512
+ else:
+ if 1024 < self._properties['ld_backend_max_count']:
+ self._properties['ld_backend_max_count'] = 1024
+
+ def get_diskarray_max_ld_count(self, xml, root):
+ max_ld_count = 0
+ for section in root.xpath(
+ './'
+ 'CMD_REQUEST/'
+ 'CHAPTER[@name="Disk Array"]/'
+ 'OBJECT[@name="Disk Array"]/'
+ 'SECTION[@name="Disk Array Detail Information"]'):
+ unit = section.find('./UNIT[@name="Product ID"]')
+ if unit is None:
+ msg = (_('UNIT[@name="Product ID"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ else:
+ product_id = unit.text
+ if product_id in self._properties['products']:
+ max_ld_count = self._properties['products'][product_id]
+ else:
+ max_ld_count = 8192
+ LOG.debug('UNIT[@name="Product ID"] unknown id. '
+ 'productId=%s', product_id)
+ LOG.debug('UNIT[@name="Product ID"] max_ld_count=%d.',
+ max_ld_count)
+ return max_ld_count
+
+ def get_pool_config(self, xml, root):
+ pools = {}
+ for xmlobj in root.xpath('./'
+ 'CMD_REQUEST/'
+ 'CHAPTER[@name="Pool"]/'
+ 'OBJECT[@name="Pool"]'):
+ section = xmlobj.find('./SECTION[@name="Pool Detail Information"]')
+ if section is None:
+ msg = (_('SECTION[@name="Pool Detail Information"] '
+ 'not found. line=%(line)d out="%(out)s"') %
+ {'line': xmlobj.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ unit = section.find('./UNIT[@name="Pool No.(h)"]')
+ if unit is None:
+ msg = (_('UNIT[@name="Pool No.(h)"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ pool_num = int(unit.text, 16)
+ unit = section.find('UNIT[@name="Pool Capacity"]')
+ if unit is None:
+ msg = (_('UNIT[@name="Pool Capacity"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ total = int(unit.text, 10)
+ unit = section.find('UNIT[@name="Free Pool Capacity"]')
+ if unit is None:
+ msg = (_('UNIT[@name="Free Pool Capacity"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ free = int(unit.text, 10)
+ if self._properties['pool_actual_free_capacity']:
+ unit = section.find('UNIT[@name="Used Pool Capacity"]')
+ if unit is None:
+ msg = (_('UNIT[@name="Used Pool Capacity"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ used = int(unit.text, 10)
+ for section in xmlobj.xpath('./SECTION[@name='
+ '"Virtual Capacity Pool '
+ 'Information"]'):
+ unit = section.find('UNIT[@name="Actual Capacity"]')
+ if unit is None:
+ msg = (_('UNIT[@name="Actual Capacity"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ total = int(unit.text, 10)
+ free = total - used
+ pool = {'pool_num': pool_num,
+ 'total': total,
+ 'free': free,
+ 'ld_list': []}
+ pools[pool_num] = pool
+ return pools
+
+ def get_ld_config(self, xml, root, pools):
+ lds = {}
+ used_ldns = []
+ for section in root.xpath('./'
+ 'CMD_REQUEST/'
+ 'CHAPTER[@name="Logical Disk"]/'
+ 'OBJECT[@name="Logical Disk"]/'
+ 'SECTION[@name="LD Detail Information"]'):
+ unit = section.find('./UNIT[@name="LDN(h)"]')
+ if unit is None:
+ msg = (_('UNIT[@name="LDN(h)"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ ldn = int(unit.text, 16)
+ unit = section.find('./UNIT[@name="OS Type"]')
+ if unit is None:
+ msg = (_('UNIT[@name="OS Type"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ ostype = unit.text if unit.text is not None else ''
+ unit = section.find('./UNIT[@name="LD Name"]')
+ if unit is None:
+ msg = (_('UNIT[@name="LD Name"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ ldname = ostype + ':' + unit.text
+ unit = section.find('./UNIT[@name="Pool No.(h)"]')
+ if unit is None:
+ msg = (_('UNIT[@name="Pool No.(h)"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ pool_num = int(unit.text, 16)
+
+ unit = section.find('./UNIT[@name="LD Capacity"]')
+ if unit is None:
+ msg = (_('UNIT[@name="LD Capacity"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ # byte capacity transform GB capacity.
+ ld_capacity = int(unit.text, 10) // units.Gi
+
+ unit = section.find('./UNIT[@name="RPL Attribute"]')
+ if unit is None:
+ msg = (_('UNIT[@name="RPL Attribute"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ rplatr = unit.text
+
+ unit = section.find('./UNIT[@name="Purpose"]')
+ if unit is None:
+ msg = (_('UNIT[@name="Purpose"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ purpose = unit.text
+
+ ld = {'ldname': ldname,
+ 'ldn': ldn,
+ 'pool_num': pool_num,
+ 'ld_capacity': ld_capacity,
+ 'RPL Attribute': rplatr,
+ 'Purpose': purpose}
+ pools[pool_num]['ld_list'].append(ld)
+ lds[ldname] = ld
+ used_ldns.append(ldn)
+ return lds, used_ldns
+
+ def get_iscsi_ldset_config(self, xml, root):
+ ldsets = {}
+ for xmlobj in root.xpath('./'
+ 'CMD_REQUEST/'
+ 'CHAPTER[@name="Access Control"]/'
+ 'OBJECT[@name="LD Set(iSCSI)"]'):
+ ldsetlds = {}
+ portals = []
+ initiators = []
+ for unit in xmlobj.xpath('./SECTION[@name="Portal"]/'
+ 'UNIT[@name="Portal"]'):
+ if not unit.text.startswith('0.0.0.0:'):
+ portals.append(unit.text)
+
+ for unit in xmlobj.xpath('./SECTION[@name="Initiator List"]/'
+ 'UNIT[@name="Initiator List"]'):
+ initiators.append(unit.text)
+
+ section = xmlobj.find('./SECTION[@name="LD Set(iSCSI)'
+ ' Information"]')
+ if section is None:
+ return ldsets
+ unit = section.find('./UNIT[@name="Platform"]')
+ if unit is None:
+ msg = (_('UNIT[@name="Platform"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ platform = unit.text
+ unit = section.find('./UNIT[@name="LD Set Name"]')
+ if unit is None:
+ msg = (_('UNIT[@name="LD Set Name"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ ldsetname = platform + ':' + unit.text
+ unit = section.find('./UNIT[@name="Target Mode"]')
+ if unit is None:
+ msg = (_('UNIT[@name="Target Mode"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ tmode = unit.text
+ if tmode == 'Normal':
+ unit = section.find('./UNIT[@name="Target Name"]')
+ if unit is None:
+ msg = (_('UNIT[@name="Target Name"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ iqn = unit.text
+ for section in xmlobj.xpath('./SECTION[@name="LUN/LD List"]'):
+ unit = section.find('./UNIT[@name="LDN(h)"]')
+ if unit is None:
+ msg = (_('UNIT[@name="LDN(h)"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ ldn = int(unit.text, 16)
+ unit = section.find('./UNIT[@name="LUN(h)"]')
+ if unit is None:
+ msg = (_('UNIT[@name="LUN(h)"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ lun = int(unit.text, 16)
+ ld = {'ldn': ldn,
+ 'lun': lun,
+ 'iqn': iqn}
+ ldsetlds[ldn] = ld
+ elif tmode == 'Multi-Target':
+ for section in xmlobj.xpath('./SECTION[@name='
+ '"Target Information For '
+ 'Multi-Target Mode"]'):
+ unit = section.find('./UNIT[@name="Target Name"]')
+ if unit is None:
+ msg = (_('UNIT[@name="Target Name"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ iqn = unit.text
+ unit = section.find('./UNIT[@name="LDN(h)"]')
+ if unit is None:
+ msg = (_('UNIT[@name="LDN(h)"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ if unit.text.startswith('-'):
+ continue
+ ldn = int(unit.text, 16)
+ unit = section.find('./UNIT[@name="LUN(h)"]')
+ if unit is None:
+ msg = (_('UNIT[@name="LUN(h)"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ if unit.text.startswith('-'):
+ continue
+ lun = int(unit.text, 16)
+ ld = {'ldn': ldn,
+ 'lun': lun,
+ 'iqn': iqn}
+ ldsetlds[ldn] = ld
+ else:
+ LOG.debug('`%(mode)s` Unknown Target Mode. '
+ 'line=%(line)d out="%(out)s"',
+ {'mode': tmode, 'line': unit.sourceline, 'out': xml})
+ ldset = {'ldsetname': ldsetname,
+ 'protocol': 'iSCSI',
+ 'portal_list': portals,
+ 'lds': ldsetlds,
+ 'initiator_list': initiators}
+ ldsets[ldsetname] = ldset
+ return ldsets
+
+ def get_fc_ldset_config(self, xml, root):
+ ldsets = {}
+ for xmlobj in root.xpath('./'
+ 'CMD_REQUEST/'
+ 'CHAPTER[@name="Access Control"]/'
+ 'OBJECT[@name="LD Set(FC)"]'):
+ ldsetlds = {}
+ section = xmlobj.find('./SECTION[@name="LD Set(FC)'
+ ' Information"]')
+ if section is None:
+ return ldsets
+ unit = section.find('./UNIT[@name="Platform"]')
+ if unit is None:
+ msg = (_('UNIT[@name="Platform"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ platform = unit.text
+ unit = section.find('./UNIT[@name="LD Set Name"]')
+ if unit is None:
+ msg = (_('UNIT[@name="LD Set Name"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ ldsetname = platform + ':' + unit.text
+ wwpns = []
+ ports = []
+ for section in xmlobj.xpath('./SECTION[@name="Path List"]'):
+ unit = section.find('./UNIT[@name="Path"]')
+ if unit is None:
+ msg = (_('UNIT[@name="Path"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ if unit.text.find('(') != -1:
+ ports.append(unit.text)
+ else:
+ wwpns.append(unit.text)
+ for section in xmlobj.xpath('./SECTION[@name="LUN/LD List"]'):
+ unit = section.find('./UNIT[@name="LDN(h)"]')
+ if unit is None:
+ msg = (_('UNIT[@name="LDN(h)"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ ldn = int(unit.text, 16)
+ unit = section.find('./UNIT[@name="LUN(h)"]')
+ if unit is None:
+ msg = (_('UNIT[@name="LUN(h)"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ lun = int(unit.text, 16)
+ ld = {'ldn': ldn,
+ 'lun': lun}
+ ldsetlds[ldn] = ld
+ ldset = {'ldsetname': ldsetname,
+ 'lds': ldsetlds,
+ 'protocol': 'FC',
+ 'wwpn': wwpns,
+ 'port': ports}
+ ldsets[ldsetname] = ldset
+ return ldsets
+
+ def get_hostport_config(self, xml, root):
+ hostports = {}
+ for section in root.xpath('./'
+ 'CMD_REQUEST/'
+ 'CHAPTER[@name="Controller"]/'
+ 'OBJECT[@name="Host Port"]/'
+ 'SECTION[@name="Host Director'
+ '/Host Port Information"]'):
+ unit = section.find('./UNIT[@name="Port No.(h)"]')
+ if unit is None:
+ msg = (_('UNIT[@name="Port No.(h)"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ units = unit.text.split('-')
+ director = int(units[0], 16)
+ port = int(units[1], 16)
+ unit = section.find('./UNIT[@name="IP Address"]')
+ if unit is None:
+ unit = section.find('./UNIT[@name="WWPN"]')
+ if unit is None:
+ msg = (_('UNIT[@name="WWPN"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ wwpn = unit.text
+ hostport = {
+ 'director': director,
+ 'port': port,
+ 'wwpn': wwpn,
+ 'protocol': 'FC',
+ }
+ else:
+ ip = unit.text
+ if ip == '0.0.0.0':
+ continue
+
+ # Port Link Status check Start.
+ unit = section.find('./UNIT[@name="Link Status"]')
+ if unit is None:
+ msg = (_('UNIT[@name="Link Status"] not found. '
+ 'line=%(line)d out="%(out)s"') %
+ {'line': section.sourceline, 'out': xml})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ linkstatus = unit.text
+ if linkstatus == 'Link Down':
+ continue
+
+ hostport = {
+ 'director': director,
+ 'port': port,
+ 'ip': ip,
+ 'protocol': 'iSCSI',
+ }
+ if director not in hostports:
+ hostports[director] = []
+ hostports[director].append(hostport)
+ return hostports
+
+ def configs(self, xml):
+ root = etree.fromstring(xml)
+ pools = self.get_pool_config(xml, root)
+ lds, used_ldns = self.get_ld_config(xml, root, pools)
+ iscsi_ldsets = self.get_iscsi_ldset_config(xml, root)
+ fc_ldsets = self.get_fc_ldset_config(xml, root)
+ hostports = self.get_hostport_config(xml, root)
+ diskarray_max_ld_count = self.get_diskarray_max_ld_count(xml, root)
+
+ self.set_backend_max_ld_count(xml, root)
+
+ ldsets = {}
+ ldsets.update(iscsi_ldsets)
+ ldsets.update(fc_ldsets)
+
+ return pools, lds, ldsets, used_ldns, hostports, diskarray_max_ld_count
+
+ def get_xml(self):
+ ismview_path = self._properties['ismview_path']
+ if os.path.exists(ismview_path) and os.path.isfile(ismview_path):
+ with open(ismview_path, 'r') as f:
+ xml = f.read()
+ LOG.debug('loaded from %s.', ismview_path)
+ else:
+ xml = self._cli.view_all(ismview_path, False, False)
+ return xml
+
+ def parse_xml(self):
+ try:
+ xml = self.get_xml()
+ return self.configs(xml)
+ except Exception:
+ LOG.debug('parse_xml Unexpected error. exception=%s',
+ traceback.format_exc())
+ xml = self._cli.view_all(self._properties['ismview_path'], False)
+ return self.configs(xml)
+
+ def get_volume_type_qos_specs(self, volume):
+ specs = {}
+
+ ctxt = context.get_admin_context()
+ type_id = volume['volume_type_id']
+ if type_id is not None:
+ volume_type = volume_types.get_volume_type(ctxt, type_id)
+
+ qos_specs_id = volume_type.get('qos_specs_id')
+ if qos_specs_id is not None:
+ specs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
+
+ LOG.debug('get_volume_type_qos_specs '
+ 'volume_type=%(volume_type)s, '
+ 'qos_specs_id=%(qos_spec_id)s '
+ 'specs=%(specs)s',
+ {'volume_type': volume_type,
+ 'qos_spec_id': qos_specs_id,
+ 'specs': specs})
+ return specs
+
+ def check_io_parameter(self, specs):
+ if ('upperlimit' not in specs and
+ 'lowerlimit' not in specs and
+ 'upperreport' not in specs):
+ specs['upperlimit'] = None
+ specs['lowerlimit'] = None
+ specs['upperreport'] = None
+ LOG.debug('qos parameter not found.')
+ else:
+ if ('upperlimit' in specs) and (specs['upperlimit'] is not None):
+ if self.validates_number(specs['upperlimit']) is True:
+ upper_limit = int(specs['upperlimit'], 10)
+ if ((upper_limit != 0) and
+ ((upper_limit < 10) or (upper_limit > 1000000))):
+ raise exception.InvalidConfigurationValue(
+ value=upper_limit, option='upperlimit')
+ else:
+ raise exception.InvalidConfigurationValue(
+ value=specs['upperlimit'], option='upperlimit')
+ else:
+ specs['upperlimit'] = None
+
+ if ('lowerlimit' in specs) and (specs['lowerlimit'] is not None):
+ if self.validates_number(specs['lowerlimit']) is True:
+ lower_limit = int(specs['lowerlimit'], 10)
+ if (lower_limit != 0 and (lower_limit < 10 or
+ lower_limit > 1000000)):
+ raise exception.InvalidConfigurationValue(
+ value=lower_limit, option='lowerlimit')
+ else:
+ raise exception.InvalidConfigurationValue(
+ value=specs['lowerlimit'], option='lowerlimit')
+ else:
+ specs['lowerlimit'] = None
+
+ if 'upperreport' in specs:
+ if specs['upperreport'] not in ['on', 'off']:
+ LOG.debug('Illegal arguments. '
+ 'upperreport is not on or off.'
+ 'upperreport=%s' % specs['upperreport'])
+ specs['upperreport'] = None
+ else:
+ specs['upperreport'] = None
+
+ def validates_number(self, value):
+ return re.match(r'^(?![-+]0+$)[-+]?([1-9][0-9]*)?[0-9](\.[0-9]+)?$',
+ '%s' % value) and True or False
diff --git a/cinder/volume/drivers/nec/volume_helper.py b/cinder/volume/drivers/nec/volume_helper.py
new file mode 100644
index 00000000000..d2d6f956bdc
--- /dev/null
+++ b/cinder/volume/drivers/nec/volume_helper.py
@@ -0,0 +1,1699 @@
+#
+# Copyright (c) 2016 NEC Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import random
+import six
+import traceback
+
+from oslo_log import log as logging
+from oslo_utils import excutils
+from oslo_utils import units
+
+from cinder import coordination
+from cinder import exception
+from cinder import volume
+from cinder.i18n import _, _LE, _LI, _LW
+from cinder.volume.drivers.nec import cli
+from cinder.volume.drivers.nec import volume_common
+
+
+LOG = logging.getLogger(__name__)
+
+
+class MStorageDriver(object):
+ """M-Series Storage helper class."""
+
+ VERSION = '1.8.1'
+ WIKI_NAME = 'NEC_Cinder_CI'
+
+ def _set_config(self, configuration, host, driver_name):
+ self._configuration = configuration
+ self._host = host
+ self._driver_name = driver_name
+ self._common = volume_common.MStorageVolumeCommon(configuration,
+ host,
+ driver_name)
+ self._properties = self._common.get_conf_properties()
+ self._cli = self._properties['cli']
+ self._volume_api = volume.API()
+ self._numofld_per_pool = 1024
+
+ def do_setup(self, context):
+ self._context = context
+ self._common.set_context(self._context)
+
+ def check_for_setup_error(self):
+ if len(getattr(self._common._local_conf, 'nec_pools', [])) == 0:
+ raise exception.ParameterNotFound(param='nec_pools')
+
+ def _convert_id2name(self, volume):
+ ldname = (self._common.get_ldname(volume['id'],
+ self._properties['ld_name_format']))
+ return ldname
+
+ def _convert_id2snapname(self, volume):
+ ldname = (self._common.get_ldname(volume['id'],
+ self._properties[
+ 'ld_backupname_format']))
+ return ldname
+
+ def _convert_id2migratename(self, volume):
+ ldname = self._convert_id2name(volume)
+ ldname = ldname + '_m'
+ return ldname
+
+ def _convert_id2name_in_migrate(self, volume):
+ """If LD has migrate_status, get LD name from source LD UUID."""
+ LOG.debug('migration_status:%s', volume['migration_status'])
+ migstat = volume['migration_status']
+ if migstat is not None and 'target:' in migstat:
+ index = migstat.find('target:')
+ if index != -1:
+ migstat = migstat[len('target:'):]
+ ldname = (self._common.get_ldname(migstat,
+ self._properties[
+ 'ld_name_format']))
+ else:
+ ldname = (self._common.get_ldname(volume['id'],
+ self._properties[
+ 'ld_name_format']))
+
+ LOG.debug('ldname=%s.', ldname)
+ return ldname
+
+ def _select_ldnumber(self, used_ldns, max_ld_count):
+ """Pick up unused LDN."""
+ for ldn in range(0, max_ld_count + 1):
+ if ldn not in used_ldns:
+ break
+ if ldn > max_ld_count - 1:
+ msg = _('All Logical Disk Numbers are used. '
+ 'No more volumes can be created.')
+ raise exception.VolumeBackendAPIException(data=msg)
+ return ldn
+
+ def _return_poolnumber(self, nominated_pools):
+ """Select pool form nominated pools."""
+ selected_pool = -1
+ min_ldn = 0
+ for pool in nominated_pools:
+ nld = len(pool['ld_list'])
+ if (nld < self._numofld_per_pool and
+ ((selected_pool == -1) or (min_ldn > nld))):
+ selected_pool = pool['pool_num']
+ min_ldn = nld
+ if selected_pool < 0:
+ msg = _('No available pools found.')
+ raise exception.VolumeBackendAPIException(data=msg)
+ return selected_pool
+
+ def _select_leastused_poolnumber(self, volume, pools,
+ xml, option=None):
+ """Pick up least used pool."""
+ size = volume['size'] * units.Gi
+ pools = [pool for (pn, pool) in six.iteritems(pools)
+ if pool['free'] >= size and
+ (len(self._properties['pool_pools']) == 0 or
+ pn in self._properties['pool_pools'])]
+ return self._return_poolnumber(pools)
+
+ def _select_migrate_poolnumber(self, volume, pools, xml, option):
+ """Pick up migration target pool."""
+ tmpPools, lds, ldsets, used_ldns, hostports, max_ld_count = (
+ self._common.configs(xml))
+ ldname = self._common.get_ldname(volume['id'],
+ self._properties['ld_name_format'])
+ ld = lds[ldname]
+ temp_conf_properties = self._common.get_conf_properties(option)
+
+ size = volume['size'] * units.Gi
+ pools = [pool for (pn, pool) in six.iteritems(pools)
+ if pool['free'] >= size and
+ (len(temp_conf_properties['pool_pools']) == 0 or
+ pn in temp_conf_properties['pool_pools'])]
+
+ selected_pool = self._return_poolnumber(pools)
+ if selected_pool == ld['pool_num']:
+ # it is not necessary to create new volume.
+ selected_pool = -1
+ return selected_pool
+
+ def _select_dsv_poolnumber(self, volume, pools, option=None):
+ """Pick up backup pool for DSV."""
+ pools = [pool for (pn, pool) in six.iteritems(pools)
+ if pn in self._properties['pool_backup_pools']]
+ return self._return_poolnumber(pools)
+
+ def _select_ddr_poolnumber(self, volume, pools, xml, option):
+ """Pick up backup pool for DDR."""
+ size = option * units.Gi
+ pools = [pool for (pn, pool) in six.iteritems(pools)
+ if pool['free'] >= size and
+ (pn in self._properties['pool_backup_pools'])]
+ return self._return_poolnumber(pools)
+
+ def _select_volddr_poolnumber(self, volume, pools, xml, option):
+ """Pick up backup pool for DDR."""
+ size = option * units.Gi
+ pools = [pool for (pn, pool) in six.iteritems(pools)
+ if pool['free'] >= size and
+ (pn in self._properties['pool_pools'])]
+ return self._return_poolnumber(pools)
+
+ def _bind_ld(self, volume, capacity, validator,
+ nameselector, poolselector, option=None):
+ return self._sync_bind_ld(volume, capacity, validator,
+ nameselector, poolselector,
+ self._properties['diskarray_name'],
+ option)
+
+ @coordination.synchronized('mstorage_bind_execute_{diskarray_name}')
+ def _sync_bind_ld(self, volume, capacity, validator, nameselector,
+ poolselector, diskarray_name, option=None):
+ """Get storage state and bind ld.
+
+ volume: ld information
+ capacity: capacity in GB
+ validator: validate method(volume, xml)
+ nameselector: select ld name method(volume)
+ poolselector: select ld location method(volume, pools)
+ diskarray_name: target diskarray name
+ option: optional info
+ """
+ LOG.debug('_bind_ld Start.')
+ xml = self._cli.view_all(self._properties['ismview_path'])
+ pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
+ self._common.configs(xml))
+
+ # execute validator function.
+ if validator is not None:
+ result = validator(volume, xml)
+ if result is False:
+ msg = _('Invalid bind Logical Disk info.')
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ # generate new ld name.
+ ldname = nameselector(volume)
+ # pick up least used pool and unused LDN.
+ selected_pool = poolselector(volume, pools, xml, option)
+ selected_ldn = self._select_ldnumber(used_ldns, max_ld_count)
+ if selected_pool < 0 or selected_ldn < 0:
+ LOG.debug('NOT necessary LD bind. '
+ 'Name=%(name)s '
+ 'Size=%(size)dGB '
+ 'LDN=%(ldn)04xh '
+ 'Pool=%(pool)04xh.',
+ {'name': ldname,
+ 'size': capacity,
+ 'ldn': selected_ldn,
+ 'pool': selected_pool})
+ return ldname, selected_ldn, selected_pool
+
+ # bind LD.
+ retnum, errnum = (self._cli.ldbind(ldname,
+ selected_pool,
+ selected_ldn,
+ capacity))
+ if retnum is False:
+ if 'iSM31077' in errnum:
+ msg = _('Logical Disk number is duplicated (%s).') % errnum
+ raise exception.VolumeBackendAPIException(data=msg)
+ else:
+ msg = _('Failed to bind Logical Disk (%s).') % errnum
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ LOG.debug('LD bound. Name=%(name)s Size=%(size)dGB '
+ 'LDN=%(ldn)04xh Pool=%(pool)04xh.',
+ {'name': ldname, 'size': capacity,
+ 'ldn': selected_ldn, 'pool': selected_pool})
+ return ldname, selected_ldn, selected_pool
+
+ def create_volume(self, volume):
+ msgparm = ('Volume ID = %(id)s, Size = %(size)dGB'
+ % {'id': volume['id'], 'size': volume['size']})
+ try:
+ self._create_volume(volume)
+ LOG.info(_LI('Created Volume (%s)'), msgparm)
+ except exception.CinderException as e:
+ with excutils.save_and_reraise_exception():
+ LOG.warning(_LW('Failed to Create Volume (%(msgparm)s) '
+ '(%(exception)s)'),
+ {'msgparm': msgparm, 'exception': e})
+
+ def _create_volume(self, volume):
+ LOG.debug('_create_volume Start.')
+
+ # select ld number and LD bind.
+ (ldname,
+ ldn,
+ selected_pool) = self._bind_ld(volume,
+ volume['size'],
+ None,
+ self._convert_id2name_in_migrate,
+ self._select_leastused_poolnumber)
+
+ # check io limit.
+ specs = self._common.get_volume_type_qos_specs(volume)
+ self._common.check_io_parameter(specs)
+ # set io limit.
+ self._cli.set_io_limit(ldname, specs)
+
+ LOG.debug('LD bound. '
+ 'Name=%(name)s '
+ 'Size=%(size)dGB '
+ 'LDN=%(ldn)04xh '
+ 'Pool=%(pool)04xh '
+ 'Specs=%(specs)s.',
+ {'name': ldname,
+ 'size': volume['size'],
+ 'ldn': ldn,
+ 'pool': selected_pool,
+ 'specs': specs})
+
+ def _can_extend_capacity(self, new_size, pools, lds, ld):
+ rvs = {}
+ ld_count_in_pool = {}
+ if ld['RPL Attribute'] == 'MV':
+ pair_lds = self._cli.get_pair_lds(ld['ldname'], lds)
+ for (ldn, pair_ld) in six.iteritems(pair_lds):
+ rv_name = pair_ld['ldname']
+ pool_number = pair_ld['pool_num']
+ ldn = pair_ld['ldn']
+ rvs[ldn] = pair_ld
+ # check rv status.
+ query_status = self._cli.query_MV_RV_status(rv_name[3:], 'RV')
+ if query_status != 'separated':
+ msg = (_('Specified Logical Disk %s has been copied.') %
+ rv_name)
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ # get pool number.
+ if pool_number in ld_count_in_pool:
+ ld_count_in_pool[pool_number].append(ldn)
+ else:
+ ld_count_in_pool[pool_number] = [ldn]
+
+ # check pool capacity.
+ for (pool_number, tmp_ldn_list) in six.iteritems(ld_count_in_pool):
+ ld_capacity = (
+ ld['ld_capacity'] * units.Gi)
+ new_size_byte = new_size * units.Gi
+ size_increase = new_size_byte - ld_capacity
+ pool = pools[pool_number]
+ ld_count = len(tmp_ldn_list)
+ if pool['free'] < size_increase * ld_count:
+ msg = (_('Not enough pool capacity. '
+ 'pool_number=%(pool)d, size_increase=%(sizeinc)d') %
+ {'pool': pool_number,
+ 'sizeinc': size_increase * ld_count})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ return rvs
+
+ def extend_volume(self, volume, new_size):
+ msgparm = ('Volume ID = %(id)s, New Size = %(newsize)dGB, '
+ 'Old Size = %(oldsize)dGB'
+ % {'id': volume['id'], 'newsize': new_size,
+ 'oldsize': volume['size']})
+ try:
+ self._extend_volume(volume, new_size)
+ LOG.info(_LI('Extended Volume (%s)'), msgparm)
+ except exception.CinderException as e:
+ with excutils.save_and_reraise_exception():
+ LOG.warning(_LW('Failed to Extend Volume (%(msgparm)s) '
+ '(%(exception)s)'),
+ {'msgparm': msgparm, 'exception': e})
+
+ def _extend_volume(self, volume, new_size):
+ LOG.debug('_extend_volume(Volume ID = %(id)s, '
+ 'new_size = %(size)s) Start.',
+ {'id': volume['id'], 'size': new_size})
+
+ xml = self._cli.view_all(self._properties['ismview_path'])
+ pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
+ self._common.configs(xml))
+
+ ldname = self._common.get_ldname(volume['id'],
+ self._properties['ld_name_format'])
+
+ # get volume.
+ if ldname not in lds:
+ msg = (_('Logical Disk has unbound already '
+ '(name=%(name)s, id=%(id)s).') %
+ {'name': ldname, 'id': volume['id']})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ ld = lds[ldname]
+ ldn = ld['ldn']
+
+ # heck pools capacity.
+ rvs = self._can_extend_capacity(new_size, pools, lds, ld)
+
+ # volume expand.
+ self._cli.expand(ldn, new_size)
+
+ # rv expand.
+ if ld['RPL Attribute'] == 'MV':
+ # ld expand.
+ for (ldn, rv) in six.iteritems(rvs):
+ self._cli.expand(ldn, new_size)
+ elif ld['RPL Attribute'] != 'IV':
+ msg = (_('RPL Attribute Error. RPL Attribute = %s.')
+ % ld['RPL Attribute'])
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ LOG.debug('_extend_volume(Volume ID = %(id)s, '
+ 'new_size = %(newsize)s) End.',
+ {'id': volume['id'], 'newsize': new_size})
+
+ def create_cloned_volume(self, volume, src_vref):
+ msgparm = ('Volume ID = %(id)s, '
+ 'Source Volume ID = %(src_id)s'
+ % {'id': volume['id'],
+ 'src_id': src_vref['id']})
+ try:
+ self._create_cloned_volume(volume, src_vref)
+ LOG.info(_LI('Created Cloned Volume (%s)'), msgparm)
+ except exception.CinderException as e:
+ with excutils.save_and_reraise_exception():
+ LOG.warning(_LW('Failed to Create Cloned Volume '
+ '(%(msgparm)s) (%(exception)s)'),
+ {'msgparm': msgparm, 'exception': e})
+
+ def _create_cloned_volume(self, volume, src_vref):
+ """Creates a clone of the specified volume."""
+ LOG.debug('_create_cloned_volume'
+ '(Volume ID = %(id)s, Source ID = %(src_id)s ) Start.',
+ {'id': volume['id'], 'src_id': src_vref['id']})
+
+ xml = self._cli.view_all(self._properties['ismview_path'])
+ pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
+ self._common.configs(xml))
+
+ # check MV existence and get MV info.
+ source_name = (
+ self._common.get_ldname(src_vref['id'],
+ self._properties['ld_name_format']))
+ if source_name not in lds:
+ msg = (_('Logical Disk `%(name)s` has unbound already. '
+ 'volume_id = %(id)s.') %
+ {'name': source_name, 'id': src_vref['id']})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+ source_ld = lds[source_name]
+
+ # check temporarily released pairs existence.
+ if source_ld['RPL Attribute'] == 'MV':
+ # get pair lds.
+ pair_lds = self._cli.get_pair_lds(source_name, lds)
+ if len(pair_lds) == 3:
+ msg = (_('Cannot create clone volume. '
+ 'number of pairs reached 3. '
+ '%(msg)s. ldname=%(ldname)s') %
+ {'msg': msg, 'ldname': source_name})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ # Creating Cloned Volume.
+ (volume_name,
+ ldn,
+ selected_pool) = self._bind_ld(volume,
+ src_vref['size'],
+ None,
+ self._convert_id2name,
+ self._select_leastused_poolnumber)
+
+ # check io limit.
+ specs = self._common.get_volume_type_qos_specs(volume)
+ self._common.check_io_parameter(specs)
+
+ # set io limit.
+ self._cli.set_io_limit(volume_name, specs)
+
+ LOG.debug('LD bound. Name=%(name)s '
+ 'Size=%(size)dGB '
+ 'LDN=%(ldn)04xh '
+ 'Pool=%(pool)04xh.',
+ {'name': volume_name,
+ 'size': volume['size'],
+ 'ldn': ldn,
+ 'pool': selected_pool})
+ LOG.debug('source_name=%(src_name)s, volume_name=%(name)s.',
+ {'src_name': source_name, 'name': volume_name})
+
+ # compare volume size and copy data to RV.
+ mv_capacity = src_vref['size']
+ rv_capacity = volume['size']
+ if rv_capacity <= mv_capacity:
+ rv_capacity = None
+
+ volume_properties = {
+ 'mvname': source_name,
+ 'rvname': volume_name,
+ 'capacity': mv_capacity,
+ 'mvid': src_vref['id'],
+ 'rvid': volume['id'],
+ 'rvldn': ldn,
+ 'rvcapacity': rv_capacity,
+ 'flag': 'clone',
+ 'context': self._context
+ }
+ self._cli.backup_restore(volume_properties, cli.UnpairWaitForClone)
+ LOG.debug('_create_cloned_volume(Volume ID = %(id)s, '
+ 'Source ID = %(src_id)s ) End.',
+ {'id': volume['id'], 'src_id': src_vref['id']})
+
+ def _validate_migrate_volume(self, volume, xml):
+ """Validate source volume information."""
+ pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
+ self._common.configs(xml))
+ ldname = self._common.get_ldname(volume['id'],
+ self._properties['ld_name_format'])
+
+ # check volume status.
+ if volume['status'] != 'available':
+ msg = _('Specified Logical Disk %s is not available.') % ldname
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ # get ld object and check rpl attribute.
+ if ldname not in lds:
+ msg = _('Logical Disk `%s` does not exist.') % ldname
+ LOG.error(msg)
+ raise exception.NotFound(msg)
+ ld = lds[ldname]
+ if ld['Purpose'] != '---':
+ msg = (_('Specified Logical Disk %(ld)s '
+ 'has an invalid attribute (%(purpose)s).')
+ % {'ld': ldname, 'purpose': ld['Purpose']})
+ raise exception.VolumeBackendAPIException(data=msg)
+ return True
+
+ def migrate_volume(self, context, volume, host):
+ msgparm = ('Volume ID = %(id)s, '
+ 'Destination Host = %(dsthost)s'
+ % {'id': volume['id'],
+ 'dsthost': host})
+ try:
+ ret = self._migrate_volume(context, volume, host)
+ LOG.info(_LI('Migrated Volume (%s)'), msgparm)
+ return ret
+ except exception.CinderException as e:
+ with excutils.save_and_reraise_exception():
+ LOG.warning(_LW('Failed to Migrate Volume '
+ '(%(msgparm)s) (%(exception)s)'),
+ {'msgparm': msgparm, 'exception': e})
+
+ def _migrate_volume(self, context, volume, host):
+ """Migrate the volume to the specified host.
+
+ Returns a boolean indicating whether the migration occurred, as well as
+ model_update.
+ """
+ LOG.debug('_migrate_volume('
+ 'Volume ID = %(id)s, '
+ 'Volume Name = %(name)s, '
+ 'host = %(host)s) Start.',
+ {'id': volume['id'],
+ 'name': volume['name'],
+ 'host': host})
+
+ false_ret = (False, None)
+
+ if 'capabilities' not in host:
+ LOG.debug('Host not in capabilities. Host = %s ', host)
+ return false_ret
+
+ capabilities = host['capabilities']
+ if capabilities.get('vendor_name') != self._properties['vendor_name']:
+ LOG.debug('Vendor is not %(vendor)s. '
+ 'capabilities = %(capabilities)s ',
+ {'vendor': self._properties['vendor_name'],
+ 'capabilities': capabilities})
+ return false_ret
+
+ # get another host group configurations.
+ temp_conf = self._common.get_conf(host)
+ temp_conf_properties = self._common.get_conf_properties(temp_conf)
+
+ # another storage configuration is not supported.
+ if temp_conf_properties['cli_fip'] != self._properties['cli_fip']:
+ LOG.debug('FIP is mismatch. FIP = %(tempfip)s != %(fip)s',
+ {'tempfip': temp_conf_properties['cli_fip'],
+ 'fip': self._properties['cli_fip']})
+ return false_ret
+
+ # bind LD.
+ (rvname,
+ ldn,
+ selected_pool) = self._bind_ld(volume,
+ volume['size'],
+ self._validate_migrate_volume,
+ self._convert_id2migratename,
+ self._select_migrate_poolnumber,
+ temp_conf)
+
+ if selected_pool >= 0:
+ # check io limit.
+ specs = self._common.get_volume_type_qos_specs(volume)
+ self._common.check_io_parameter(specs)
+
+ # set io limit.
+ self._cli.set_io_limit(rvname, specs)
+
+ volume_properties = {
+ 'mvname':
+ self._common.get_ldname(
+ volume['id'], self._properties['ld_name_format']),
+ 'rvname': rvname,
+ 'capacity':
+ volume['size'] * units.Gi,
+ 'mvid': volume['id'],
+ 'rvid': None,
+ 'flag': 'migrate',
+ 'context': self._context
+ }
+ # replicate LD.
+ self._cli.backup_restore(volume_properties,
+ cli.UnpairWaitForMigrate)
+
+ LOG.debug('_migrate_volume(Volume ID = %(id)s, '
+ 'Host = %(host)s) End.',
+ {'id': volume['id'], 'host': host})
+
+ return (True, [])
+
+ def check_for_export(self, context, volume_id):
+ pass
+
+ def iscsi_do_export(self, _ctx, volume, connector, ensure=False):
+ msgparm = ('Volume ID = %(id)s, '
+ 'Initiator Name = %(initiator)s'
+ % {'id': volume['id'],
+ 'initiator': connector['initiator']})
+ try:
+ ret = self._iscsi_do_export(_ctx, volume, connector, ensure)
+ LOG.info(_LI('Created iSCSI Export (%s)'), msgparm)
+ return ret
+ except exception.CinderException as e:
+ with excutils.save_and_reraise_exception():
+ LOG.warning(_LW('Failed to Create iSCSI Export '
+ '(%(msgparm)s) (%(exception)s)'),
+ {'msgparm': msgparm, 'exception': e})
+
+ def _iscsi_do_export(self, _ctx, volume, connector, ensure):
+ while True:
+ xml = self._cli.view_all(self._properties['ismview_path'])
+ pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
+ self._common.configs(xml))
+
+ # find LD Set.
+
+ # get target LD Set name.
+ metadata = {}
+ # image to volume or volume to image.
+ if (volume['status'] in ['downloading', 'uploading'] and
+ self._properties['ldset_controller_node_name'] != ''):
+ metadata['ldset'] = (
+ self._properties['ldset_controller_node_name'])
+ LOG.debug('image to volume or volume to image:%s',
+ volume['status'])
+ # migrate.
+ elif (volume['migration_status'] is not None and
+ self._properties['ldset_controller_node_name'] != ''):
+ metadata['ldset'] = (
+ self._properties['ldset_controller_node_name'])
+ LOG.debug('migrate:%s', volume['migration_status'])
+
+ ldset = self._common.get_ldset(ldsets, metadata)
+ if ldset is None:
+ for tldset in six.itervalues(ldsets):
+ n = tldset['initiator_list'].count(connector['initiator'])
+ if ('initiator_list' in tldset and n > 0):
+ ldset = tldset
+ LOG.debug('ldset=%s.', ldset)
+ break
+ if ldset is None:
+ msg = _('Appropriate Logical Disk Set could not be found.')
+ raise exception.NotFound(msg)
+
+ if len(ldset['portal_list']) < 1:
+ msg = (_('Logical Disk Set `%s` has no portal.') %
+ ldset['ldsetname'])
+ raise exception.NotFound(msg)
+
+ LOG.debug('migration_status:%s', volume['migration_status'])
+ migstat = volume['migration_status']
+ if migstat is not None and 'target:' in migstat:
+ index = migstat.find('target:')
+ if index != -1:
+ migstat = migstat[len('target:'):]
+ ldname = (
+ self._common.get_ldname(
+ migstat, self._properties['ld_name_format']))
+ else:
+ ldname = (
+ self._common.get_ldname(
+ volume['id'], self._properties['ld_name_format']))
+
+ # add LD to LD set.
+ if ldname not in lds:
+ msg = _('Logical Disk `%s` could not be found.') % ldname
+ raise exception.NotFound(msg)
+ ld = lds[ldname]
+
+ if ld['ldn'] not in ldset['lds']:
+ # Check the LD is remaining on ldset_controller_node.
+ ldset_controller_node_name = (
+ self._properties['ldset_controller_node_name'])
+ if ldset_controller_node_name != '':
+ if ldset_controller_node_name != ldset['ldsetname']:
+ ldset_controller = ldsets[ldset_controller_node_name]
+ if ld['ldn'] in ldset_controller['lds']:
+ LOG.debug(
+ 'delete remaining the LD from '
+ 'ldset_controller_node. '
+ 'Ldset Name=%s.',
+ ldset_controller_node_name)
+ self._cli.delldsetld(ldset_controller_node_name,
+ ldname)
+ # assign the LD to LD Set.
+ self._cli.addldsetld(ldset['ldsetname'], ldname)
+
+ LOG.debug('Add LD `%(ld)s` to LD Set `%(ldset)s`.',
+ {'ld': ldname, 'ldset': ldset['ldsetname']})
+ else:
+ break
+
+ # enumerate portals for iscsi multipath.
+ prefered_director = ld['pool_num'] % 2
+ nominated = []
+ for director in [prefered_director, 1 - prefered_director]:
+ if director not in hostports:
+ continue
+ dirportal = []
+ for port in hostports[director]:
+ if not port['protocol'] == 'iSCSI':
+ continue
+ for portal in ldset['portal_list']:
+ if portal.startswith(port['ip'] + ':'):
+ dirportal.append(portal)
+ break
+ if ((self._properties['portal_number'] > 0) and
+ (len(dirportal) > self._properties['portal_number'])):
+ nominated.extend(random.sample(
+ dirportal, self._properties['portal_number']))
+ else:
+ nominated.extend(dirportal)
+
+ if len(nominated) == 0:
+ raise exception.NotFound(
+ _('Any portal not match to any host ports.'))
+
+ location = ('%(list)s,1 %(iqn)s %(lun)d'
+ % {'list': ';'.join(nominated),
+ 'iqn': ldset['lds'][ld['ldn']]['iqn'],
+ 'lun': ldset['lds'][ld['ldn']]['lun']})
+
+ LOG.debug('%(ensure)sexport LD `%(name)s` via `%(location)s`.',
+ {'ensure': 'ensure_' if ensure else '',
+ 'name': ldname,
+ 'location': location})
+ return {'provider_location': location}
+
+ def fc_do_export(self, _ctx, volume, connector, ensure=False):
+ msgparm = ('Volume ID = %(id)s, '
+ 'Initiator WWPNs = %(wwpns)s'
+ % {'id': volume['id'],
+ 'wwpns': connector['wwpns']})
+ try:
+ ret = self._fc_do_export(_ctx, volume, connector, ensure)
+ LOG.info(_LI('Created FC Export (%s)'), msgparm)
+ return ret
+ except exception.CinderException as e:
+ with excutils.save_and_reraise_exception():
+ LOG.warning(_LW('Failed to Create FC Export '
+ '(%(msgparm)s) (%(exception)s)'),
+ {'msgparm': msgparm, 'exception': e})
+
+ def _fc_do_export(self, _ctx, volume, connector, ensure):
+ while True:
+ xml = self._cli.view_all(self._properties['ismview_path'])
+ pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
+ self._common.configs(xml))
+
+ # find LD Set.
+
+ # get target LD Set.
+ metadata = {}
+ # image to volume or volume to image.
+ if (volume['status'] in ['downloading', 'uploading'] and
+ self._properties['ldset_controller_node_name'] != ''):
+ metadata['ldset'] = (
+ self._properties['ldset_controller_node_name'])
+ LOG.debug('image to volume or volume to image:%s',
+ volume['status'])
+ # migrate.
+ elif (volume['migration_status'] is not None and
+ self._properties['ldset_controller_node_name'] != ''
+ ):
+ metadata['ldset'] = (
+ self._properties['ldset_controller_node_name'])
+ LOG.debug('migrate:%s', volume['migration_status'])
+
+ ldset = self._common.get_ldset(ldsets, metadata)
+ if ldset is None:
+ for conect in connector['wwpns']:
+ length = len(conect)
+ findwwpn = '-'.join([conect[i:i + 4]
+ for i in range(0, length, 4)])
+ findwwpn = findwwpn.upper()
+ for tldset in six.itervalues(ldsets):
+ if 'wwpn' in tldset and findwwpn in tldset['wwpn']:
+ ldset = tldset
+ LOG.debug('ldset=%s.', ldset)
+ break
+ if ldset is not None:
+ break
+ if ldset is None:
+ msg = _('Logical Disk Set could not be found.')
+ raise exception.NotFound(msg)
+
+ # get free lun.
+ luns = []
+ ldsetlds = ldset['lds']
+ for ld in six.itervalues(ldsetlds):
+ luns.append(ld['lun'])
+
+ target_lun = 0
+ for lun in sorted(luns):
+ if target_lun < lun:
+ break
+ target_lun += 1
+
+ LOG.debug('migration_status:%s', volume['migration_status'])
+ migstat = volume['migration_status']
+ if migstat is not None and 'target:' in migstat:
+ index = migstat.find('target:')
+ if index != -1:
+ migstat = migstat[len('target:'):]
+ ldname = (
+ self._common.get_ldname(
+ migstat,
+ self._properties['ld_name_format']))
+ else:
+ ldname = (
+ self._common.get_ldname(
+ volume['id'],
+ self._properties['ld_name_format']))
+
+ # add LD to LD set.
+ if ldname not in lds:
+ msg = _('Logical Disk `%s` could not be found.') % ldname
+ raise exception.NotFound(msg)
+ ld = lds[ldname]
+
+ if ld['ldn'] not in ldset['lds']:
+ # Check the LD is remaining on ldset_controller_node.
+ ldset_controller_node_name = (
+ self._properties['ldset_controller_node_name'])
+ if ldset_controller_node_name != '':
+ if ldset_controller_node_name != ldset['ldsetname']:
+ ldset_controller = ldsets[ldset_controller_node_name]
+ if ld['ldn'] in ldset_controller['lds']:
+ LOG.debug(
+ 'delete remaining the LD from '
+ 'ldset_controller_node. '
+ 'Ldset Name=%s.'
+ % ldset_controller_node_name)
+ self._cli.delldsetld(ldset_controller_node_name,
+ ldname)
+ # assign the LD to LD Set.
+ self._cli.addldsetld(ldset['ldsetname'], ldname, target_lun)
+
+ LOG.debug('Add LD `%(ld)s` to LD Set `%(ldset)s`.',
+ {'ld': ldname, 'ldset': ldset['ldsetname']})
+ else:
+ break
+
+ LOG.debug('%(ensure)sexport LD `%(ld)s`.',
+ {'ensure': 'ensure_' if ensure else '',
+ 'ld': ldname})
+
+ def remove_export(self, context, volume):
+ msgparm = 'Volume ID = %s' % volume['id']
+ try:
+ self._remove_export(context, volume)
+ LOG.info(_LI('Removed Export (%s)'), msgparm)
+ except exception.CinderException as e:
+ with excutils.save_and_reraise_exception():
+ LOG.warning(_LW('Failed to Remove Export '
+ '(%(msgparm)s) (%(exception)s)'),
+ {'msgparm': msgparm, 'exception': e})
+
+ def _remove_export(self, context, volume):
+ if (volume['status'] == 'uploading' and
+ volume['attach_status'] == 'attached'):
+ return
+ else:
+ LOG.debug('_remove_export Start.')
+ xml = self._cli.view_all(self._properties['ismview_path'])
+ pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
+ self._common.configs(xml))
+
+ # get target LD Set.
+ metadata = {}
+ # image to volume or volume to image.
+ if (volume['status'] in ['downloading', 'uploading'] and
+ self._properties['ldset_controller_node_name'] != ''):
+ metadata['ldset'] = (
+ self._properties['ldset_controller_node_name'])
+ LOG.debug('image to volume or volume to image:%s',
+ volume['status'])
+ # migrate.
+ elif (volume['migration_status'] is not None and
+ self._properties['ldset_controller_node_name'] != ''
+ ):
+ metadata['ldset'] = (
+ self._properties['ldset_controller_node_name'])
+ LOG.debug('migrate:%s', volume['migration_status'])
+
+ ldset = self._common.get_ldset(ldsets, metadata)
+
+ LOG.debug('migration_status:%s', volume['migration_status'])
+ migstat = volume['migration_status']
+ if migstat is not None and 'target:' in migstat:
+ index = migstat.find('target:')
+ if index != -1:
+ migstat = migstat[len('target:'):]
+ ldname = (
+ self._common.get_ldname(
+ migstat,
+ self._properties['ld_name_format']))
+ else:
+ ldname = (
+ self._common.get_ldname(
+ volume['id'],
+ self._properties['ld_name_format']))
+
+ if ldname not in lds:
+ LOG.debug('LD `%s` already unbound?' % ldname)
+ return
+
+ ld = lds[ldname]
+ ldsetlist = []
+
+ if ldset is None:
+ for tldset in six.itervalues(ldsets):
+ if ld['ldn'] in tldset['lds']:
+ ldsetlist.append(tldset)
+ LOG.debug('ldset=%s.', tldset)
+ if len(ldsetlist) == 0:
+ LOG.debug('LD `%s` already deleted from LD Set?',
+ ldname)
+ return
+ else:
+ if ld['ldn'] not in ldset['lds']:
+ LOG.debug('LD `%(ld)s` already deleted '
+ 'from LD Set `%(ldset)s`?',
+ {'ld': ldname, 'ldset': ldset['ldsetname']})
+ return
+ ldsetlist.append(ldset)
+
+ # delete LD from LD set.
+ for tagetldset in ldsetlist:
+ retnum, errnum = (self._cli.delldsetld(
+ tagetldset['ldsetname'], ldname))
+
+ if retnum is not True:
+ if 'iSM31065' in errnum:
+ LOG.debug(
+ 'LD `%(ld)s` already deleted '
+ 'from LD Set `%(ldset)s`?',
+ {'ld': ldname, 'ldset': tagetldset['ldsetname']})
+ else:
+ msg = (_('Failed to unregister Logical Disk from '
+ 'Logical Disk Set (%s)') % errnum)
+ raise exception.VolumeBackendAPIException(data=msg)
+ LOG.debug('LD `%(ld)s` deleted from LD Set `%(ldset)s`.',
+ {'ld': ldname, 'ldset': tagetldset['ldsetname']})
+
+ LOG.debug('_remove_export(Volume ID = %s) End.', volume['id'])
+
+ def iscsi_initialize_connection(self, volume, connector):
+ msgparm = ('Volume ID = %(id)s, Connector = %(connector)s'
+ % {'id': volume['id'], 'connector': connector})
+
+ try:
+ ret = self._iscsi_initialize_connection(volume, connector)
+ LOG.info(_LI('Initialized iSCSI Connection (%s)'), msgparm)
+ return ret
+ except exception.CinderException as e:
+ with excutils.save_and_reraise_exception():
+ LOG.warning(_LW('Failed to Initialize iSCSI Connection '
+ '(%(msgparm)s) (%(exception)s)'),
+ {'msgparm': msgparm, 'exception': e})
+
+ def _iscsi_initialize_connection(self, volume, connector):
+ """Initializes the connection and returns connection info.
+
+ The iscsi driver returns a driver_volume_type of 'iscsi'.
+ The format of the driver data is defined in _get_iscsi_properties.
+ Example return value::
+
+ {
+ 'driver_volume_type': 'iscsi'
+ 'data': {
+ 'target_discovered': True,
+ 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
+ 'target_portal': '127.0.0.0.1:3260',
+ 'volume_id': 1,
+ 'access_mode': 'rw'
+ }
+ }
+
+ """
+ LOG.debug('_iscsi_initialize_connection'
+ '(Volume ID = %(id)s, connector = %(connector)s) Start.',
+ {'id': volume['id'], 'connector': connector})
+
+ provider_location = volume['provider_location']
+ provider_location = provider_location.split()
+ info = {'driver_volume_type': 'iscsi',
+ 'data': {'target_portal': random.choice(
+ provider_location[0][0:-2].split(";")),
+ 'target_iqn': provider_location[1],
+ 'target_lun': int(provider_location[2]),
+ 'target_discovered': False,
+ 'volume_id': volume['id']}
+ }
+ if connector.get('multipath'):
+ portals_len = len(provider_location[0][0:-2].split(";"))
+ info['data'].update({'target_portals':
+ provider_location[0][0:-2].split(";"),
+ 'target_iqns': [provider_location[1]] *
+ portals_len,
+ 'target_luns': [int(provider_location[2])] *
+ portals_len})
+ LOG.debug('_iscsi_initialize_connection'
+ '(Volume ID = %(id)s, connector = %(connector)s, '
+ 'info = %(info)s) End.',
+ {'id': volume['id'],
+ 'connector': connector,
+ 'info': info})
+ return info
+
+ def iscsi_terminate_connection(self, volume, connector):
+ msgparm = ('Volume ID = %(id)s, Connector = %(connector)s'
+ % {'id': volume['id'], 'connector': connector})
+
+ try:
+ ret = self._iscsi_terminate_connection(volume, connector)
+ LOG.info(_LI('Terminated iSCSI Connection (%s)'), msgparm)
+ return ret
+ except exception.CinderException as e:
+ with excutils.save_and_reraise_exception():
+ LOG.warning(_LW('Failed to Terminate iSCSI Connection '
+ '(%(msgparm)s) (%(exception)s)'),
+ {'msgparm': msgparm, 'exception': e})
+
+ def _iscsi_terminate_connection(self, volume, connector):
+ LOG.debug('execute _iscsi_terminate_connection'
+ '(Volume ID = %(id)s, connector = %(connector)s).',
+ {'id': volume['id'], 'connector': connector})
+
+ def fc_initialize_connection(self, volume, connector):
+ msgparm = ('Volume ID = %(id)s, Connector = %(connector)s'
+ % {'id': volume['id'], 'connector': connector})
+
+ try:
+ ret = self._fc_initialize_connection(volume, connector)
+ LOG.info(_LI('Initialized FC Connection (%s)'), msgparm)
+ return ret
+ except exception.CinderException as e:
+ with excutils.save_and_reraise_exception():
+ LOG.warning(_LW('Failed to Initialize FC Connection '
+ '(%(msgparm)s) (%(exception)s)'),
+ {'msgparm': msgparm, 'exception': e})
+
+ def _fc_initialize_connection(self, volume, connector):
+ """Initializes the connection and returns connection info.
+
+ The driver returns a driver_volume_type of 'fibre_channel'.
+ The target_wwn can be a single entry or a list of wwns that
+ correspond to the list of remote wwn(s) that will export the volume.
+ Example return values:
+
+ {
+ 'driver_volume_type': 'fibre_channel'
+ 'data': {
+ 'target_discovered': True,
+ 'target_lun': 1,
+ 'target_wwn': '1234567890123',
+ 'access_mode': 'rw'
+ }
+ }
+
+ or
+
+ {
+ 'driver_volume_type': 'fibre_channel'
+ 'data': {
+ 'target_discovered': True,
+ 'target_lun': 1,
+ 'target_wwn': ['1234567890123', '0987654321321'],
+ 'access_mode': 'rw'
+ }
+ }
+ """
+
+ LOG.debug('_fc_initialize_connection'
+ '(Volume ID = %(id)s, connector = %(connector)s) Start.',
+ {'id': volume['id'], 'connector': connector})
+
+ xml = self._cli.view_all(self._properties['ismview_path'])
+ pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
+ self._common.configs(xml))
+
+ # get target wwpns and initiator/target map.
+
+ fc_ports = []
+ for director, hostport in six.iteritems(hostports):
+ for port in hostport:
+ if port['protocol'] == 'FC':
+ fc_ports.append(port)
+ target_wwns, init_targ_map = (
+ self._build_initiator_target_map(connector, fc_ports))
+
+ LOG.debug('migration_status:%s', volume['migration_status'])
+ migstat = volume['migration_status']
+ if migstat is not None and 'target:' in migstat:
+ index = migstat.find('target:')
+ if index != -1:
+ migstat = migstat[len('target:'):]
+ ldname = (
+ self._common.get_ldname(migstat,
+ self._properties['ld_name_format']))
+ else:
+ ldname = (
+ self._common.get_ldname(volume['id'],
+ self._properties['ld_name_format']))
+
+ # get lun.
+ if ldname not in lds:
+ msg = (_('Logical Disk %(ld)s has unbound already. '
+ 'volume_id = %(id)s.') %
+ {'ld': ldname, 'id': volume['id']})
+ LOG.error(msg)
+ raise exception.NotFound(msg)
+ ldn = lds[ldname]['ldn']
+
+ lun = None
+ for ldset in six.itervalues(ldsets):
+ if ldn in ldset['lds']:
+ lun = ldset['lds'][ldn]['lun']
+ break
+
+ info = {
+ 'driver_volume_type': 'fibre_channel',
+ 'data': {'target_lun': lun,
+ 'target_wwn': target_wwns,
+ 'initiator_target_map': init_targ_map}}
+
+ LOG.debug('_fc_initialize_connection'
+ '(Volume ID = %(id)s, connector = %(connector)s, '
+ 'info = %(info)s) End.',
+ {'id': volume['id'],
+ 'connector': connector,
+ 'info': info})
+ return info
+
+ def fc_terminate_connection(self, volume, connector):
+ msgparm = ('Volume ID = %(id)s, Connector = %(connector)s'
+ % {'id': volume['id'], 'connector': connector})
+
+ try:
+ ret = self._fc_terminate_connection(volume, connector)
+ LOG.info(_LI('Terminated FC Connection (%s)'), msgparm)
+ return ret
+ except exception.CinderException as e:
+ with excutils.save_and_reraise_exception():
+ LOG.warning(_LW('Failed to Terminate FC Connection '
+ '(%(msgparm)s) (%(exception)s)'),
+ {'msgparm': msgparm, 'exception': e})
+
+ def _fc_terminate_connection(self, volume, connector):
+ """Disallow connection from connector."""
+ LOG.debug('_fc_terminate_connection'
+ '(Volume ID = %(id)s, connector = %(connector)s) Start.',
+ {'id': volume['id'], 'connector': connector})
+
+ xml = self._cli.view_all(self._properties['ismview_path'])
+ pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
+ self._common.configs(xml))
+
+ # get target wwpns and initiator/target map.
+ fc_ports = []
+ for director, hostport in six.iteritems(hostports):
+ for port in hostport:
+ if port['protocol'] == 'FC':
+ fc_ports.append(port)
+ target_wwns, init_targ_map = (
+ self._build_initiator_target_map(connector, fc_ports))
+
+ info = {'driver_volume_type': 'fibre_channel',
+ 'data': {'target_wwn': target_wwns,
+ 'initiator_target_map': init_targ_map}}
+ LOG.debug('_fc_terminate_connection'
+ '(Volume ID = %(id)s, connector = %(connector)s, '
+ 'info = %(info)s) End.',
+ {'id': volume['id'],
+ 'connector': connector,
+ 'info': info})
+ return info
+
+ def _build_initiator_target_map(self, connector, fc_ports):
+ target_wwns = []
+ for port in fc_ports:
+ target_wwns.append(port['wwpn'])
+
+ initiator_wwns = connector['wwpns']
+
+ init_targ_map = {}
+ for initiator in initiator_wwns:
+ init_targ_map[initiator] = target_wwns
+
+ return target_wwns, init_targ_map
+
+ def _update_volume_status(self):
+ """Retrieve status info from volume group."""
+
+ data = {}
+
+ data['volume_backend_name'] = (self._properties['backend_name'] or
+ self._driver_name)
+ data['vendor_name'] = self._properties['vendor_name']
+ data['driver_version'] = self.VERSION
+ data['reserved_percentage'] = self._properties['reserved_percentage']
+ data['QoS_support'] = True
+
+ # Get xml data from file and parse.
+ try:
+ pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
+ self._common.parse_xml())
+
+ # Get capacities from pools.
+ pool_capacity = self._common.get_pool_capacity(pools, ldsets)
+
+ data['total_capacity_gb'] = pool_capacity['total_capacity_gb']
+ data['free_capacity_gb'] = pool_capacity['free_capacity_gb']
+ except Exception:
+ LOG.debug('_update_volume_status Unexpected error. '
+ 'exception=%s',
+ traceback.format_exc())
+ data['total_capacity_gb'] = 0
+ data['free_capacity_gb'] = 0
+ return data
+
+ def iscsi_get_volume_stats(self, refresh=False):
+ """Get volume status.
+
+ If 'refresh' is True, run update the stats first.
+ """
+ if refresh:
+ self._stats = self._update_volume_status()
+ self._stats['storage_protocol'] = 'iSCSI'
+ LOG.debug('data=%(data)s, config_group=%(group)s',
+ {'data': self._stats,
+ 'group': self._properties['config_group']})
+
+ return self._stats
+
+ def fc_get_volume_stats(self, refresh=False):
+ """Get volume status.
+
+ If 'refresh' is True, run update the stats first.
+ """
+
+ if refresh:
+ self._stats = self._update_volume_status()
+ self._stats['storage_protocol'] = 'FC'
+ LOG.debug('data=%(data)s, config_group=%(group)s',
+ {'data': self._stats,
+ 'group': self._properties['config_group']})
+
+ return self._stats
+
+ def get_pool(self, volume):
+ LOG.debug('backend_name=%s', self._properties['backend_name'])
+ return self._properties['backend_name']
+
+ def delete_volume(self, volume):
+ msgparm = 'Volume ID = %s' % volume['id']
+ try:
+ self._delete_volume(volume)
+ LOG.info(_LI('Deleted Volume (%s)'), msgparm)
+ except exception.CinderException as e:
+ with excutils.save_and_reraise_exception():
+ LOG.warning(_LW('Failed to Delete Volume '
+ '(%(msgparm)s) (%(exception)s)'),
+ {'msgparm': msgparm, 'exception': e})
+
+ def _delete_volume(self, volume):
+ LOG.debug('_delete_volume Start.')
+ xml = self._cli.view_all(self._properties['ismview_path'])
+ pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
+ self._common.configs(xml))
+
+ ldname = (
+ self._common.get_ldname(volume['id'],
+ self._properties['ld_name_format']))
+ if ldname not in lds:
+ LOG.debug('LD `%s` already unbound?', ldname)
+ return
+
+ ld = lds[ldname]
+
+ if ld['RPL Attribute'] == 'IV':
+ pass
+
+ elif ld['RPL Attribute'] == 'MV':
+ query_status = self._cli.query_MV_RV_status(ldname[3:], 'MV')
+ if query_status == 'separated':
+ # unpair.
+ rvname = self._cli.query_MV_RV_name(ldname[3:], 'MV')
+ self._cli.unpair(ldname[3:], rvname, 'force')
+ else:
+ msg = _('Specified Logical Disk %s has been copied.') % ldname
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ elif ld['RPL Attribute'] == 'RV':
+ query_status = self._cli.query_MV_RV_status(ldname[3:], 'RV')
+ if query_status == 'separated':
+ # unpair.
+ mvname = self._cli.query_MV_RV_name(ldname[3:], 'RV')
+ self._cli.unpair(mvname, ldname[3:], 'force')
+ else:
+ msg = _('Specified Logical Disk %s has been copied.') % ldname
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ else:
+ msg = (_('RPL Attribute Error. RPL Attribute = %s.')
+ % ld['RPL Attribute'])
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ # Check the LD is remaining on ldset_controller_node.
+ ldset_controller_node_name = (
+ self._properties['ldset_controller_node_name'])
+ if ldset_controller_node_name != '':
+ if ldset_controller_node_name in ldsets:
+ ldset = ldsets[ldset_controller_node_name]
+ if ld['ldn'] in ldset['lds']:
+ LOG.debug('delete LD from ldset_controller_node. '
+ 'Ldset Name=%s.',
+ ldset_controller_node_name)
+ self._cli.delldsetld(ldset_controller_node_name, ldname)
+
+ # unbind LD.
+ self._cli.unbind(ldname)
+ LOG.debug('LD unbound. Name=%s.', ldname)
+
+ def create_snapshot(self, snapshot):
+ msgparm = ('Snapshot ID = %(id)s, Snapshot Volume ID = %(vol_id)s'
+ % {'id': snapshot['id'], 'vol_id': snapshot['volume_id']})
+ try:
+ self._create_snapshot(snapshot)
+ LOG.info(_LI('Created Snapshot (%s)'), msgparm)
+ except exception.CinderException as e:
+ with excutils.save_and_reraise_exception():
+ LOG.warning(_LW('Failed to Create Snapshot '
+ '(%(msgparm)s) (%(exception)s)'),
+ {'msgparm': msgparm, 'exception': e})
+
+ def _create_snapshot(self, snapshot):
+ LOG.debug('_create_snapshot'
+ '(Volume ID = %(id)s, Snapshot ID = %(snap_id)s ) Start.',
+ {'id': snapshot['volume_id'], 'snap_id': snapshot['id']})
+
+ if len(self._properties['pool_backup_pools']) == 0:
+ LOG.error(_LE('backup_pools is not set.'))
+ raise exception.ParameterNotFound(param='backup_pools')
+
+ xml = self._cli.view_all(self._properties['ismview_path'])
+ pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
+ self._common.configs(xml))
+
+ # get MV name.
+ ldname = self._common.get_ldname(snapshot['volume_id'],
+ self._properties['ld_name_format'])
+
+ ld = self._cli.check_ld_existed_rplstatus(lds,
+ ldname,
+ snapshot,
+ 'backup')
+ ld_capacity = ld['ld_capacity']
+
+ (snapshotname,
+ selected_ldn,
+ selected_pool) = self._bind_ld(snapshot,
+ ld_capacity,
+ None,
+ self._convert_id2snapname,
+ self._select_ddr_poolnumber,
+ ld_capacity)
+
+ volume_properties = {
+ 'mvname': ldname,
+ 'rvname': snapshotname,
+ 'capacity': ld['ld_capacity'],
+ 'mvid': snapshot['volume_id'],
+ 'rvid': snapshot['id'],
+ 'flag': 'backup',
+ 'context': self._context
+ }
+ self._cli.backup_restore(volume_properties, cli.UnpairWaitForBackup)
+
+ LOG.debug('_create_snapshot'
+ '(Volume ID = %(voi_id)s, Snapshot ID = %(snap_id)s) End.',
+ {'vol_id': snapshot['volume_id'],
+ 'snap_id': snapshot['id']})
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ msgparm = ('Volume ID = %(id)s, '
+ 'Snapshot ID = %(snap_id)s, '
+ 'Snapshot Volume ID = %(snapvol_id)s'
+ % {'id': volume['id'],
+ 'snap_id': snapshot['id'],
+ 'snapvol_id': snapshot['volume_id']})
+ try:
+ self._create_volume_from_snapshot(volume, snapshot)
+ LOG.info(_LI('Created Volume from Snapshot (%s)'), msgparm)
+ except exception.CinderException as e:
+ with excutils.save_and_reraise_exception():
+ LOG.warning(_LW('Failed to Create Volume from Snapshot '
+ '(%(msgparm)s) (%(exception)s)'),
+ {'msgparm': msgparm, 'exception': e})
+
+ def _create_volume_from_snapshot(self, volume, snapshot):
+ LOG.debug('_create_volume_from_snapshot'
+ '(Volume ID = %(vol)s, Snapshot ID = %(snap)s) Start.',
+ {'vol': volume['id'], 'snap': snapshot['id']})
+ xml = self._cli.view_all(self._properties['ismview_path'])
+ pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
+ self._common.configs(xml))
+
+ # get MV name.
+ mvname = (
+ self._common.get_ldname(snapshot['id'],
+ self._properties['ld_backupname_format']))
+ ld = self._cli.check_ld_existed_rplstatus(lds,
+ mvname,
+ snapshot,
+ 'restore')
+ mv_capacity = ld['ld_capacity']
+ rv_capacity = volume['size']
+
+ (rvname,
+ selected_ldn,
+ selected_pool) = self._bind_ld(volume,
+ mv_capacity,
+ None,
+ self._convert_id2name,
+ self._select_volddr_poolnumber,
+ mv_capacity)
+
+ # check io limit.
+ specs = self._common.get_volume_type_qos_specs(volume)
+ self._common.check_io_parameter(specs)
+
+ # set io limit.
+ self._cli.set_io_limit(rvname, specs)
+
+ if rv_capacity <= mv_capacity:
+ rvnumber = None
+ rv_capacity = None
+
+ # Restore Start.
+ volume_properties = {
+ 'mvname': mvname,
+ 'rvname': rvname,
+ 'capacity': mv_capacity,
+ 'mvid': snapshot['id'],
+ 'rvid': volume['id'],
+ 'rvldn': rvnumber,
+ 'rvcapacity': rv_capacity,
+ 'flag': 'restore',
+ 'context': self._context
+ }
+ self._cli.backup_restore(volume_properties, cli.UnpairWaitForRestore)
+
+ LOG.debug('_create_volume_from_snapshot'
+ '(Volume ID = %(vol)s, Snapshot ID = %(snap)s, '
+ 'Specs=%(specs)s) End.',
+ {'vol': volume['id'],
+ 'snap': snapshot['id'],
+ 'specs': specs})
+
+ def delete_snapshot(self, snapshot):
+ msgparm = ('Snapshot ID = %(id)s, '
+ 'Snapshot Volume ID = %(vol_id)s'
+ % {'id': snapshot['id'],
+ 'vol_id': snapshot['volume_id']})
+ try:
+ self._delete_snapshot(snapshot)
+ LOG.info(_LI('Deleted Snapshot (%s)'), msgparm)
+ except exception.CinderException as e:
+ with excutils.save_and_reraise_exception():
+ LOG.warning(_LW('Failed to Delete Snapshot '
+ '(%(msgparm)s) (%(exception)s)'),
+ {'msgparm': msgparm, 'exception': e})
+
+ def _delete_snapshot(self, snapshot):
+ LOG.debug('_delete_snapshot(Snapshot ID = %s) Start.', snapshot['id'])
+ xml = self._cli.view_all(self._properties['ismview_path'])
+ pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
+ self._common.configs(xml))
+ # get ld name.
+ ldname = (
+ self._common.get_ldname(snapshot['id'],
+ self._properties['ld_backupname_format']))
+ ld = self._cli.check_ld_existed_rplstatus(lds,
+ ldname,
+ snapshot,
+ 'delete')
+ if ld is None:
+ return
+
+ self._cli.unbind(ldname)
+
+ LOG.debug('_delete_snapshot(Snapshot ID = %s) End.', snapshot['id'])
+
+
+class MStorageDSVDriver(MStorageDriver):
+ """M-Series Storage Snapshot helper class."""
+
+ def create_snapshot(self, snapshot):
+ msgparm = ('Snapshot ID = %(snap_id)s, '
+ 'Snapshot Volume ID = %(snapvol_id)s'
+ % {'snap_id': snapshot['id'],
+ 'snapvol_id': snapshot['volume_id']})
+ try:
+ self._create_snapshot(snapshot,
+ self._properties['diskarray_name'])
+ LOG.info(_LI('Created Snapshot (%s)'), msgparm)
+ except exception.CinderException as e:
+ with excutils.save_and_reraise_exception():
+ LOG.warning(_LW('Failed to Create Snapshot '
+ '(%(msgparm)s) (%(exception)s)'),
+ {'msgparm': msgparm, 'exception': e})
+
+ @coordination.synchronized('mstorage_bind_execute_{diskarray_name}')
+ def _create_snapshot(self, snapshot, diskarray_name):
+ LOG.debug('_create_snapshot(Volume ID = %(snapvol_id)s, '
+ 'Snapshot ID = %(snap_id)s ) Start.',
+ {'snapvol_id': snapshot['volume_id'],
+ 'snap_id': snapshot['id']})
+
+ xml = self._cli.view_all(self._properties['ismview_path'])
+ pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
+ self._common.configs(xml))
+
+ if len(self._properties['pool_backup_pools']) == 0:
+ LOG.error(_LE('backup_pools is not set.'))
+ raise exception.ParameterNotFound(param='backup_pools')
+
+ # get BV name.
+ ldname = self._common.get_ldname(snapshot['volume_id'],
+ self._properties['ld_name_format'])
+ if ldname not in lds:
+ msg = _('Logical Disk `%s` has unbound already.') % ldname
+ LOG.error(msg)
+ raise exception.NotFound(msg)
+
+ selected_pool = self._select_dsv_poolnumber(snapshot, pools, None)
+ snapshotname = self._convert_id2snapname(snapshot)
+ self._cli.snapshot_create(ldname, snapshotname[3:], selected_pool)
+
+ LOG.debug('_create_snapshot(Volume ID = %(snapvol_id)s, '
+ 'Snapshot ID = %(snap_id)s) End.',
+ {'snapvol_id': snapshot['volume_id'],
+ 'snap_id': snapshot['id']})
+
+ def delete_snapshot(self, snapshot):
+ msgparm = ('Snapshot ID = %(snap_id)s, '
+ 'Snapshot Volume ID = %(snapvol_id)s'
+ % {'snap_id': snapshot['id'],
+ 'snapvol_id': snapshot['volume_id']})
+ try:
+ self._delete_snapshot(snapshot)
+ LOG.info(_LI('Deleted Snapshot (%s)'), msgparm)
+ except exception.CinderException as e:
+ with excutils.save_and_reraise_exception():
+ LOG.warning(_LW('Failed to Delete Snapshot '
+ '(%(msgparm)s) (%(exception)s)'),
+ {'msgparm': msgparm, 'exception': e})
+
+ def _delete_snapshot(self, snapshot):
+ LOG.debug('_delete_snapshot(Snapshot ID = %s) Start.',
+ snapshot['id'])
+ xml = self._cli.view_all(self._properties['ismview_path'])
+ pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
+ self._common.configs(xml))
+
+ # get BV name.
+ ldname = self._common.get_ldname(snapshot['volume_id'],
+ self._properties['ld_name_format'])
+ if ldname not in lds:
+ LOG.debug('LD(MV) `%s` already unbound?', ldname)
+ return
+
+ # get SV name.
+ snapshotname = (
+ self._common.get_ldname(snapshot['id'],
+ self._properties['ld_backupname_format']))
+ if snapshotname not in lds:
+ LOG.debug('LD(SV) `%s` already unbound?', snapshotname)
+ return
+
+ self._cli.snapshot_delete(ldname, snapshotname[3:])
+
+ LOG.debug('_delete_snapshot(Snapshot ID = %s) End.', snapshot['id'])
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ msgparm = ('Volume ID = %(vol_id)s, '
+ 'Snapshot ID = %(snap_id)s, '
+ 'Snapshot Volume ID = %(snapvol_id)s'
+ % {'vol_id': volume['id'],
+ 'snap_id': snapshot['id'],
+ 'snapvol_id': snapshot['volume_id']})
+ try:
+ self._create_volume_from_snapshot(volume, snapshot)
+ LOG.info(_LI('Created Volume from Snapshot (%s)'), msgparm)
+ except exception.CinderException as e:
+ with excutils.save_and_reraise_exception():
+ LOG.warning(_LW('Failed to Create Volume from Snapshot '
+ '(%(msgparm)s) (%(exception)s)'),
+ {'msgparm': msgparm, 'exception': e})
+
+ def _create_volume_from_snapshot(self, volume, snapshot):
+ LOG.debug('_create_volume_from_snapshot'
+ '(Volume ID = %(vol_id)s, Snapshot ID(SV) = %(snap_id)s, '
+ 'Snapshot ID(BV) = %(snapvol_id)s) Start.',
+ {'vol_id': volume['id'],
+ 'snap_id': snapshot['id'],
+ 'snapvol_id': snapshot['volume_id']})
+ xml = self._cli.view_all(self._properties['ismview_path'])
+ pools, lds, ldsets, used_ldns, hostports, max_ld_count = (
+ self._common.configs(xml))
+
+ # get BV name.
+ mvname = (
+ self._common.get_ldname(snapshot['volume_id'],
+ self._properties['ld_name_format']))
+
+ # get SV name.
+ rvname = (
+ self._common.get_ldname(snapshot['id'],
+ self._properties['ld_backupname_format']))
+
+ if rvname not in lds:
+ msg = _('Logical Disk `%s` has unbound already.') % rvname
+ LOG.error(msg)
+ raise exception.NotFound(msg)
+ rv = lds[rvname]
+
+ # check snapshot status.
+ query_status = self._cli.query_BV_SV_status(mvname[3:], rvname[3:])
+ if query_status != 'snap/active':
+ msg = (_('Cannot create volume from snapshot, '
+ 'because the snapshot data does not exist. '
+ 'bvname=%(bvname)s, svname=%(svname)s') %
+ {'bvname': mvname, 'svname': rvname})
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ mv_capacity = rv['ld_capacity']
+ rv_capacity = volume['size']
+
+ (new_rvname,
+ rvnumber,
+ selected_pool) = self._bind_ld(volume,
+ mv_capacity,
+ None,
+ self._convert_id2name,
+ self._select_volddr_poolnumber,
+ mv_capacity)
+
+ # check io limit.
+ specs = self._common.get_volume_type_qos_specs(volume)
+ self._common.check_io_parameter(specs)
+
+ # set io limit.
+ self._cli.set_io_limit(new_rvname, specs)
+
+ if rv_capacity <= mv_capacity:
+ rvnumber = None
+ rv_capacity = None
+
+ # Restore Start.
+ volume_properties = {
+ 'mvname': rvname,
+ 'rvname': new_rvname,
+ 'prev_mvname': None,
+ 'capacity': mv_capacity,
+ 'mvid': snapshot['id'],
+ 'rvid': volume['id'],
+ 'rvldn': rvnumber,
+ 'rvcapacity': rv_capacity,
+ 'flag': 'esv_restore',
+ 'context': self._context
+ }
+ self._cli.backup_restore(volume_properties,
+ cli.UnpairWaitForDDRRestore)
+
+ LOG.debug('_create_volume_from_snapshot(Volume ID = %(vol_id)s, '
+ 'Snapshot ID(SV) = %(snap_id)s, '
+ 'Snapshot ID(BV) = %(snapvol_id)s, '
+ 'Specs=%(specs)s) End.',
+ {'vol_id': volume['id'],
+ 'snap_id': snapshot['id'],
+ 'snapvol_id': snapshot['volume_id'],
+ 'specs': specs})
diff --git a/releasenotes/notes/nec_storage_volume_driver-57663f9ecce1ae19.yaml b/releasenotes/notes/nec_storage_volume_driver-57663f9ecce1ae19.yaml
new file mode 100644
index 00000000000..d1e75dbf0c4
--- /dev/null
+++ b/releasenotes/notes/nec_storage_volume_driver-57663f9ecce1ae19.yaml
@@ -0,0 +1,3 @@
+---
+features:
+ - Added backend drivers for NEC Storage.(FC/iSCSI)