Merge "Huawei driver refactor(2/10)"
This commit is contained in:
commit
7795cd0e23
@ -20,12 +20,14 @@ import json
|
||||
import mock
|
||||
import re
|
||||
import requests
|
||||
import retrying
|
||||
import tempfile
|
||||
from xml.dom import minidom
|
||||
from xml.etree import ElementTree
|
||||
|
||||
from cinder import context
|
||||
from cinder import exception
|
||||
from cinder import objects
|
||||
from cinder.objects import fields
|
||||
from cinder import test
|
||||
from cinder.tests.unit import fake_group
|
||||
@ -74,7 +76,8 @@ ENCODE_NAME = huawei_utils.encode_name(ID)
|
||||
ID2 = 'ee00eb7c-40dc-4256-bfea-6c3a16ab850d'
|
||||
OLD_ENCODE_NAME = huawei_utils.old_encode_name(ID2)
|
||||
|
||||
METADATA = {}
|
||||
METADATA = [{'key': 'huawei_lun_id', 'value': '11'},
|
||||
{'key': 'huawei_lun_wwn', 'value': '6643e8c1004c5f6723e9f454003'}]
|
||||
TEST_PAIR_ID = "3400a30d844d0004"
|
||||
VOL_METADATA = [{'key': 'hypermetro_id', 'value': '11'},
|
||||
{'key': 'remote_lun_id', 'value': '1'}]
|
||||
@ -112,13 +115,13 @@ fake_hypermetro_opts = {'hypermetro': 'true',
|
||||
'thick_provisioning_support': False,
|
||||
}
|
||||
|
||||
sync_replica_specs = {'replication_enabled': '<is> True',
|
||||
sync_replica_specs = {'capabilities:replication_enabled': '<is> True',
|
||||
'replication_type': '<in> sync'}
|
||||
async_replica_specs = {'replication_enabled': '<is> True',
|
||||
async_replica_specs = {'capabilities:replication_enabled': '<is> True',
|
||||
'replication_type': '<in> async'}
|
||||
|
||||
replica_hypermetro_specs = {'hypermetro': '<is> True',
|
||||
'replication_enabled': '<is> True'}
|
||||
replica_hypermetro_specs = {'capabilities:hypermetro': '<is> True',
|
||||
'capabilities:replication_enabled': '<is> True'}
|
||||
|
||||
test_host = {'host': 'ubuntu001@backend001#OpenStack_Pool',
|
||||
'capabilities': {'smartcache': True,
|
||||
@ -149,11 +152,11 @@ test_new_type = {
|
||||
'created_at': None,
|
||||
'updated_at': None,
|
||||
'extra_specs': {
|
||||
'smarttier': '<is> true',
|
||||
'smartcache': '<is> true',
|
||||
'smartpartition': '<is> true',
|
||||
'thin_provisioning_support': '<is> true',
|
||||
'thick_provisioning_support': '<is> False',
|
||||
'capabilities:smarttier': '<is> true',
|
||||
'capabilities:smartcache': '<is> true',
|
||||
'capabilities:smartpartition': '<is> true',
|
||||
'capabilities:thin_provisioning_support': '<is> true',
|
||||
'capabilities:thick_provisioning_support': '<is> False',
|
||||
'policy': '2',
|
||||
'smartcache:cachename': 'cache-test',
|
||||
'smartpartition:partitionname': 'partition-test',
|
||||
@ -171,7 +174,7 @@ test_new_replication_type = {
|
||||
'created_at': None,
|
||||
'updated_at': None,
|
||||
'extra_specs': {
|
||||
'replication_enabled': '<is> True',
|
||||
'capabilities:replication_enabled': '<is> True',
|
||||
'replication_type': '<in> sync',
|
||||
},
|
||||
'is_public': True,
|
||||
@ -187,7 +190,7 @@ test_hypermetro_type = {
|
||||
'created_at': None,
|
||||
'updated_at': None,
|
||||
'extra_specs': {
|
||||
'hypermetro': '<is> True'
|
||||
'capabilities:hypermetro': '<is> True'
|
||||
},
|
||||
'is_public': True,
|
||||
'deleted_at': None,
|
||||
@ -431,7 +434,7 @@ FAKE_LUN_GET_SUCCESS_RESPONSE = """
|
||||
"RUNNINGSTATUS": "10",
|
||||
"HEALTHSTATUS": "1",
|
||||
"RUNNINGSTATUS": "27",
|
||||
"LUNLIST": "",
|
||||
"LUNLIST": "[]",
|
||||
"ALLOCTYPE": "1",
|
||||
"CAPACITY": "2097152",
|
||||
"WRITEPOLICY": "1",
|
||||
@ -455,8 +458,25 @@ FAKE_QUERY_ALL_LUN_RESPONSE = {
|
||||
"code": 0
|
||||
},
|
||||
"data": [{
|
||||
"ID": "1",
|
||||
"NAME": ENCODE_NAME
|
||||
"ID": "11",
|
||||
"NAME": ENCODE_NAME,
|
||||
"WWN": "6643e8c1004c5f6723e9f454003",
|
||||
"DESCRIPTION": "21ec7341-9256-497b-97d9-ef48edcf0635",
|
||||
"HEALTHSTATUS": "1",
|
||||
"RUNNINGSTATUS": "27",
|
||||
"LUNLIST": "[]",
|
||||
"ALLOCTYPE": "1",
|
||||
"CAPACITY": "2097152",
|
||||
"WRITEPOLICY": "1",
|
||||
"MIRRORPOLICY": "0",
|
||||
"PREFETCHPOLICY": "1",
|
||||
"PREFETCHVALUE": "20",
|
||||
"DATATRANSFERPOLICY": "1",
|
||||
"READCACHEPOLICY": "2",
|
||||
"WRITECACHEPOLICY": "5",
|
||||
"OWNINGCONTROLLER": "0B",
|
||||
"SMARTCACHEPARTITIONID": "",
|
||||
"CACHEPARTITIONID": "",
|
||||
}]
|
||||
}
|
||||
|
||||
@ -2283,7 +2303,7 @@ class HuaweiTestBase(test.TestCase):
|
||||
|
||||
self.replica_volume = fake_volume.fake_volume_obj(
|
||||
admin_contex, host=HOST, provider_location=PROVIDER_LOCATION,
|
||||
metadata=METADATA, replication_status='disabled',
|
||||
volume_metadata=METADATA, replication_status='disabled',
|
||||
replication_driver_data=REPLICA_DRIVER_DATA, id=ID)
|
||||
|
||||
self.hyper_volume = fake_volume.fake_volume_obj(
|
||||
@ -2310,6 +2330,13 @@ class HuaweiTestBase(test.TestCase):
|
||||
constants.DEFAULT_WAIT_TIMEOUT = .5
|
||||
constants.MIGRATION_WAIT_INTERVAL = .1
|
||||
|
||||
constants.QOS_SPEC_KEYS = (
|
||||
'maxIOPS', 'minIOPS', 'minBandWidth',
|
||||
'maxBandWidth', 'latency', 'IOType')
|
||||
constants.QOS_IOTYPES = ('0', '1', '2')
|
||||
constants.SUPPORT_LUN_TYPES = ('Thick', 'Thin')
|
||||
constants.DEFAULT_LUN_TYPE = 'Thick'
|
||||
|
||||
def test_encode_name(self):
|
||||
lun_name = huawei_utils.encode_name(self.volume.id)
|
||||
self.assertEqual('21ec7341-ca82ece92e1ac480c963f1', lun_name)
|
||||
@ -2417,13 +2444,14 @@ class HuaweiTestBase(test.TestCase):
|
||||
),
|
||||
'expect': {'huawei_lun_id': '11',
|
||||
'huawei_lun_wwn': 'FAKE_WWN',
|
||||
'huawei_sn': None,
|
||||
'hypermetro_id': '11',
|
||||
'remote_lun_id': '1'}
|
||||
}
|
||||
)
|
||||
@ddt.unpack
|
||||
def test_get_lun_metadata(self, volume, expect):
|
||||
metadata = huawei_utils.get_lun_metadata(volume)
|
||||
def test_get_volume_private_data(self, volume, expect):
|
||||
metadata = huawei_utils.get_volume_private_data(volume)
|
||||
self.assertEqual(expect, metadata)
|
||||
|
||||
@ddt.data(
|
||||
@ -2448,50 +2476,55 @@ class HuaweiTestBase(test.TestCase):
|
||||
{
|
||||
'snapshot': fake_snapshot.fake_snapshot_obj(
|
||||
admin_contex,
|
||||
provider_location='11'),
|
||||
'expect': {'huawei_snapshot_id': '11'}
|
||||
provider_location='11',
|
||||
snapshot_metadata=[{'key': 'huawei_snapshot_wwn',
|
||||
'value': 'fake_wwn'},
|
||||
],
|
||||
expected_attrs=['metadata'],
|
||||
),
|
||||
'expect': {'huawei_snapshot_id': '11',
|
||||
'huawei_snapshot_wwn': 'fake_wwn',
|
||||
}
|
||||
}
|
||||
)
|
||||
@ddt.unpack
|
||||
def test_get_snapshot_metadata(self, snapshot, expect):
|
||||
metadata = huawei_utils.get_snapshot_metadata(snapshot)
|
||||
self.assertEqual(expect, metadata)
|
||||
def test_get_snapshot_private_data(self, snapshot, expect):
|
||||
metadata = huawei_utils.get_snapshot_private_data(snapshot)
|
||||
self.assertDictEqual(expect, metadata)
|
||||
|
||||
@ddt.data(
|
||||
{
|
||||
'volume': fake_volume.fake_volume_obj(
|
||||
admin_contex, provider_location=PROVIDER_LOCATION),
|
||||
'expect': ('11', '6643e8c1004c5f6723e9f454003'),
|
||||
'provider_location': PROVIDER_LOCATION,
|
||||
'mock_func': None,
|
||||
},
|
||||
{
|
||||
'volume': fake_volume.fake_volume_obj(
|
||||
admin_contex, id=ID),
|
||||
'expect': ('1', None),
|
||||
'provider_location': '',
|
||||
'mock_func': None,
|
||||
},
|
||||
{
|
||||
'volume': fake_volume.fake_volume_obj(
|
||||
admin_contex, id=ID2),
|
||||
'expect': ('1', None),
|
||||
'provider_location': PROVIDER_LOCATION,
|
||||
'mock_func': 'get_lun_info_by_name',
|
||||
},
|
||||
{
|
||||
'volume': fake_volume.fake_volume_obj(
|
||||
admin_contex, id='001e7071-413c-4c60-b087-863067ecdd72'),
|
||||
'expect': (None, None),
|
||||
}
|
||||
'provider_location': '{"huawei_lun_wwn": "fake_wwn"}',
|
||||
'mock_func': None,
|
||||
},
|
||||
)
|
||||
@ddt.unpack
|
||||
def test_get_volume_lun_id(self, volume, expect):
|
||||
volume_id = huawei_utils.get_volume_lun_id(self.driver.client,
|
||||
volume)
|
||||
self.assertEqual(expect, volume_id)
|
||||
def test_get_lun_info(self, provider_location, mock_func):
|
||||
volume = fake_volume.fake_volume_obj(
|
||||
admin_contex, id=ID, provider_location=provider_location)
|
||||
|
||||
if mock_func:
|
||||
self.mock_object(self.driver.client, mock_func, return_value=None)
|
||||
lun_info = huawei_utils.get_lun_info(self.driver.client, volume)
|
||||
|
||||
if provider_location in (PROVIDER_LOCATION, ''):
|
||||
self.assertEqual('6643e8c1004c5f6723e9f454003', lun_info['WWN'])
|
||||
else:
|
||||
self.assertIsNone(lun_info)
|
||||
|
||||
@ddt.data(
|
||||
{
|
||||
'snapshot': fake_snapshot.fake_snapshot_obj(
|
||||
admin_contex,
|
||||
provider_location=SNAP_PROVIDER_LOCATION),
|
||||
'expect': '11',
|
||||
},
|
||||
{
|
||||
'snapshot': fake_snapshot.fake_snapshot_obj(
|
||||
admin_contex, id=ID),
|
||||
@ -2509,10 +2542,13 @@ class HuaweiTestBase(test.TestCase):
|
||||
}
|
||||
)
|
||||
@ddt.unpack
|
||||
def test_get_snapshot_id(self, snapshot, expect):
|
||||
snapshot_id = huawei_utils.get_snapshot_id(self.driver.client,
|
||||
snapshot)
|
||||
self.assertEqual(expect, snapshot_id)
|
||||
def test_get_snapshot_info(self, snapshot, expect):
|
||||
snapshot_info = huawei_utils.get_snapshot_info(
|
||||
self.driver.client, snapshot)
|
||||
if expect:
|
||||
self.assertEqual(expect, snapshot_info['ID'])
|
||||
else:
|
||||
self.assertIsNone(snapshot_info)
|
||||
|
||||
@ddt.data(
|
||||
{'host_name': HOST,
|
||||
@ -2654,8 +2690,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
side_effect=[{}, task_info])
|
||||
moved, model_update = self.driver.migrate_volume(None,
|
||||
self.volume,
|
||||
test_host,
|
||||
None)
|
||||
test_host)
|
||||
self.assertTrue(moved)
|
||||
self.assertEqual(empty_dict, model_update)
|
||||
|
||||
@ -2684,8 +2719,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
return_value=task_info)
|
||||
moved, model_update = self.driver.migrate_volume(None,
|
||||
self.replica_volume,
|
||||
test_host,
|
||||
None)
|
||||
test_host)
|
||||
self.assertTrue(moved)
|
||||
self.assertEqual(empty_dict, model_update)
|
||||
|
||||
@ -2712,7 +2746,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
return_value=task_info)
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.migrate_volume,
|
||||
None, self.volume, test_host, None)
|
||||
None, self.volume, test_host)
|
||||
|
||||
@ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT)
|
||||
def test_migrate_volume_fail_no_migrate_task(self, pool_data):
|
||||
@ -2737,11 +2771,12 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
return_value=task_info)
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.migrate_volume,
|
||||
None, self.volume, test_host, None)
|
||||
None, self.volume, test_host)
|
||||
|
||||
@ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT)
|
||||
def test_migrate_volume_with_type_id(self, pool_data):
|
||||
self.driver.support_func = pool_data
|
||||
self.volume.volume_type = None
|
||||
self.volume.volume_type_id = '550c089b-bfdd-4f7f-86e1-3ba88125555c'
|
||||
task_info = {"data": [{"ENDTIME": "1436816174",
|
||||
"ID": "9",
|
||||
@ -2766,8 +2801,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
return_value=task_info)
|
||||
moved, model_update = self.driver.migrate_volume(None,
|
||||
self.volume,
|
||||
test_host,
|
||||
None)
|
||||
test_host)
|
||||
self.assertTrue(moved)
|
||||
self.assertEqual(empty_dict, model_update)
|
||||
|
||||
@ -2807,9 +2841,9 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
|
||||
def test_create_volume_from_snapsuccess(self):
|
||||
self.mock_object(
|
||||
common.HuaweiBaseDriver,
|
||||
'_get_volume_type',
|
||||
return_value={'extra_specs': sync_replica_specs})
|
||||
huawei_utils,
|
||||
'get_volume_params',
|
||||
return_value={'replication_enabled': True})
|
||||
self.mock_object(replication.ReplicaCommonDriver, 'sync')
|
||||
model_update = self.driver.create_volume_from_snapshot(self.volume,
|
||||
self.snapshot)
|
||||
@ -3066,12 +3100,17 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
self.driver.create_volume, self.volume)
|
||||
|
||||
def test_delete_volume_fail(self):
|
||||
self.driver.client.test_fail = True
|
||||
self.driver.delete_volume(self.volume)
|
||||
self.mock_object(
|
||||
self.driver.client, 'delete_lun',
|
||||
side_effect=exception.VolumeBackendAPIException(data=''))
|
||||
self.driver.support_func = {}
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.delete_volume, self.volume)
|
||||
|
||||
def test_delete_snapshot_fail(self):
|
||||
self.driver.client.test_fail = True
|
||||
self.driver.delete_snapshot(self.snapshot)
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.delete_snapshot, self.snapshot)
|
||||
|
||||
def test_delete_snapshot_with_snapshot_nonexistent(self):
|
||||
self.snapshot.provider_location = None
|
||||
@ -3193,14 +3232,11 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
pool_info = self.driver.client.get_pool_info(pool_name, pools)
|
||||
self.assertEqual(test_info, pool_info)
|
||||
|
||||
def test_get_smartx_specs_opts(self):
|
||||
smartx_opts = smartx.SmartX().get_smartx_specs_opts(smarttier_opts)
|
||||
self.assertEqual('3', smartx_opts['policy'])
|
||||
|
||||
@ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT)
|
||||
@mock.patch.object(smartx.SmartQos, 'get_qos_by_volume_type',
|
||||
return_value={'MAXIOPS': '100',
|
||||
'IOType': '2'})
|
||||
@mock.patch.object(huawei_utils, 'get_volume_params',
|
||||
return_value={'qos': {'MAXIOPS': '100',
|
||||
'IOType': '2'}
|
||||
})
|
||||
def test_create_smartqos(self, mock_qos_value, pool_data):
|
||||
self.driver.support_func = pool_data
|
||||
lun_info = self.driver.create_volume(self.volume)
|
||||
@ -3210,7 +3246,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
json.loads(lun_info['provider_location']))
|
||||
|
||||
@ddt.data('front-end', 'back-end')
|
||||
@mock.patch.object(common.HuaweiBaseDriver, '_get_volume_params',
|
||||
@mock.patch.object(huawei_utils, 'get_volume_params',
|
||||
return_value={'smarttier': 'true',
|
||||
'smartcache': 'true',
|
||||
'smartpartition': 'true',
|
||||
@ -3247,49 +3283,29 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
@ddt.unpack
|
||||
def test_create_smartqos_failed(self, qos_specs_value, pool_data):
|
||||
self.driver.support_func = pool_data
|
||||
self.mock_object(
|
||||
common.HuaweiBaseDriver,
|
||||
'_get_volume_params',
|
||||
return_value={'smarttier': 'true',
|
||||
'smartcache': 'true',
|
||||
'smartpartition': 'true',
|
||||
'thin_provisioning_support': 'true',
|
||||
'thick_provisioning_support': 'false',
|
||||
'policy': '2',
|
||||
'cachename': 'cache-test',
|
||||
'partitionname': 'partition-test'})
|
||||
self.mock_object(common.HuaweiBaseDriver, '_get_volume_type',
|
||||
return_value={'qos_specs_id': u'025ce295-15e9-41a7'})
|
||||
self.mock_object(qos_specs, 'get_qos_specs',
|
||||
return_value=qos_specs_value)
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.volume.volume_type = objects.VolumeType(
|
||||
extra_specs={}, qos_specs_id=ID)
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
self.driver.create_volume, self.volume)
|
||||
|
||||
@ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT)
|
||||
def test_create_smartqos_without_huawei_type(self, pool_data):
|
||||
self.driver.support_func = pool_data
|
||||
self.mock_object(
|
||||
common.HuaweiBaseDriver,
|
||||
'_get_volume_params',
|
||||
return_value={'smarttier': 'true',
|
||||
'smartcache': 'true',
|
||||
'smartpartition': 'true',
|
||||
'thin_provisioning_support': 'true',
|
||||
'thick_provisioning_support': 'false',
|
||||
'policy': '2',
|
||||
'cachename': 'cache-test',
|
||||
'partitionname': 'partition-test'})
|
||||
self.mock_object(common.HuaweiBaseDriver, '_get_volume_type',
|
||||
return_value={'qos_specs_id': u'025ce295-15e9-41a7'})
|
||||
self.mock_object(qos_specs, 'get_qos_specs',
|
||||
return_value={'specs': {'fake_qos_type': '100',
|
||||
'IOType': '2'}})
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
'IOType': '2'}}
|
||||
)
|
||||
self.volume.volume_type = objects.VolumeType(
|
||||
extra_specs={}, qos_specs_id=ID)
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
self.driver.create_volume, self.volume)
|
||||
|
||||
@mock.patch.object(smartx.SmartQos, 'get_qos_by_volume_type',
|
||||
return_value={'MAXIOPS': '100',
|
||||
'IOType': '2'})
|
||||
@mock.patch.object(huawei_utils, 'get_volume_params',
|
||||
return_value={'qos': {'MAXIOPS': '100',
|
||||
'IOType': '2'}
|
||||
})
|
||||
@mock.patch.object(rest_client.RestClient, 'find_array_version',
|
||||
return_value='V300R003C00')
|
||||
@mock.patch.object(rest_client.RestClient, 'find_available_qos',
|
||||
@ -3305,9 +3321,10 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
self.assertDictEqual(expect_value,
|
||||
json.loads(lun_info['provider_location']))
|
||||
|
||||
@mock.patch.object(smartx.SmartQos, 'get_qos_by_volume_type',
|
||||
return_value={'MINIOPS': '100',
|
||||
'IOType': '2'})
|
||||
@mock.patch.object(huawei_utils, 'get_volume_params',
|
||||
return_value={'qos': {'MAXIOPS': '100',
|
||||
'IOType': '2'}
|
||||
})
|
||||
@mock.patch.object(rest_client.RestClient, 'find_array_version',
|
||||
return_value='V300R003C00')
|
||||
@mock.patch.object(rest_client.RestClient, 'find_available_qos',
|
||||
@ -3323,51 +3340,33 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
self.assertDictEqual(expect_value,
|
||||
json.loads(lun_info['provider_location']))
|
||||
|
||||
@mock.patch.object(smartx.SmartQos, 'get_qos_by_volume_type',
|
||||
return_value={'MINIOPS': '100',
|
||||
'IOType': '2'})
|
||||
@mock.patch.object(rest_client.RestClient, 'find_array_version',
|
||||
return_value='V300R003C00')
|
||||
@mock.patch.object(rest_client.RestClient, 'find_available_qos',
|
||||
return_value=('11', u'["0", "2", "3"]'))
|
||||
def test_create_smartqos_on_v3r3_with_unsupport_qos(
|
||||
self, mock_find_available_qos,
|
||||
mock_qos_value, mock_array_version):
|
||||
self.driver.support_func = FAKE_POOLS_UNSUPPORT_REPORT
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.create_volume, self.volume)
|
||||
|
||||
@ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT)
|
||||
@mock.patch.object(smartx.SmartQos, 'get_qos_by_volume_type',
|
||||
return_value={'MINIOPS': '100',
|
||||
'IOType': '2'})
|
||||
@mock.patch.object(rest_client.RestClient, 'find_array_version',
|
||||
return_value='V300R003C00')
|
||||
@mock.patch.object(rest_client.RestClient, 'find_available_qos',
|
||||
return_value=(None, []))
|
||||
@mock.patch.object(rest_client.RestClient, 'activate_deactivate_qos')
|
||||
def test_create_smartqos_on_v3r3_active_failed(self,
|
||||
pool_data,
|
||||
mock_activate_qos,
|
||||
mock_find_available_qos,
|
||||
mock_qos_value,
|
||||
mock_array_version):
|
||||
def test_create_smartqos_on_v3r3_active_failed(self, pool_data):
|
||||
self.driver.support_func = pool_data
|
||||
mock_activate_qos.side_effect = (
|
||||
exception.VolumeBackendAPIException(data='Activate or deactivate '
|
||||
'QoS error. '))
|
||||
|
||||
self.mock_object(huawei_utils, 'get_volume_params',
|
||||
return_value={'qos': {'MAXIOPS': '100',
|
||||
'IOType': '2'}
|
||||
}
|
||||
)
|
||||
self.mock_object(self.driver.client, 'activate_deactivate_qos',
|
||||
side_effect=exception.VolumeBackendAPIException(
|
||||
data='Activate or deactivate QoS error.')
|
||||
)
|
||||
self.mock_object(smartx.SmartQos, 'remove')
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.create_volume, self.volume)
|
||||
|
||||
@ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT)
|
||||
@mock.patch.object(smartx.SmartQos, 'get_qos_by_volume_type',
|
||||
return_value={'MINIOPS': '100',
|
||||
'IOType': '2'})
|
||||
@mock.patch.object(huawei_utils, 'get_volume_params',
|
||||
return_value={'qos': {'MAXIOPS': '100',
|
||||
'IOType': '2'}
|
||||
})
|
||||
@mock.patch.object(rest_client.RestClient, 'find_array_version',
|
||||
return_value='V300R003C00')
|
||||
@mock.patch.object(rest_client.RestClient, 'find_available_qos',
|
||||
return_value=(None, []))
|
||||
@mock.patch.object(rest_client.RestClient, 'create_qos_policy')
|
||||
@mock.patch.object(rest_client.RestClient, 'create_qos')
|
||||
def test_create_smartqos_on_v3r3_qos_failed(self,
|
||||
pool_data,
|
||||
mock_create_qos,
|
||||
@ -3398,7 +3397,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
self.driver.delete_volume(self.volume)
|
||||
|
||||
@mock.patch.object(rest_client.RestClient, 'add_lun_to_partition')
|
||||
@mock.patch.object(common.HuaweiBaseDriver, '_get_volume_params',
|
||||
@mock.patch.object(huawei_utils, 'get_volume_params',
|
||||
return_value={'smarttier': 'true',
|
||||
'smartcache': 'true',
|
||||
'smartpartition': 'true',
|
||||
@ -3414,46 +3413,20 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
self.assertDictEqual(expect_value,
|
||||
json.loads(lun_info['provider_location']))
|
||||
|
||||
@ddt.data([{'smarttier': 'true', 'smartcache': 'true',
|
||||
'smartpartition': 'true',
|
||||
'thin_provisioning_support': 'true',
|
||||
'thick_provisioning_support': 'false',
|
||||
'policy': '2', 'cachename': None,
|
||||
'partitionname': 'partition-test'},
|
||||
FAKE_POOLS_UNSUPPORT_REPORT],
|
||||
[{'smarttier': 'true', 'smartcache': 'true',
|
||||
'smartpartition': 'true',
|
||||
'thin_provisioning_support': 'true',
|
||||
'thick_provisioning_support': 'false',
|
||||
'policy': '2', 'cachename': 'cache-test',
|
||||
'partitionname': None},
|
||||
FAKE_POOLS_SUPPORT_REPORT],
|
||||
[{'smarttier': 'true', 'smartcache': 'true',
|
||||
'smartpartition': 'true',
|
||||
'thin_provisioning_support': 'true',
|
||||
'thick_provisioning_support': 'false',
|
||||
'policy': '2', 'cachename': None,
|
||||
'partitionname': 'partition-test'},
|
||||
FAKE_POOLS_SUPPORT_REPORT],
|
||||
[{'smarttier': 'true', 'smartcache': 'true',
|
||||
'smartpartition': 'true',
|
||||
'thin_provisioning_support': 'true',
|
||||
'thick_provisioning_support': 'false',
|
||||
'policy': '2', 'cachename': 'cache-test',
|
||||
'partitionname': None},
|
||||
FAKE_POOLS_UNSUPPORT_REPORT])
|
||||
@ddt.unpack
|
||||
def test_create_smartCache_failed(self, opts, pool_data):
|
||||
self.driver.support_func = pool_data
|
||||
self.mock_object(
|
||||
common.HuaweiBaseDriver,
|
||||
'_get_volume_params',
|
||||
return_value=opts)
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
@ddt.data({'capabilities:smartcache': 'true',
|
||||
'cachename': 'fake_name'},
|
||||
{'capabilities:smartcache': '<is> true',
|
||||
'cachename': None},
|
||||
{'capabilities:smartcache': '<is> true',
|
||||
'cachename': ''},
|
||||
)
|
||||
def test_create_smartCache_failed(self, extra_specs):
|
||||
self.volume.volume_type = objects.VolumeType(extra_specs=extra_specs)
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
self.driver.create_volume, self.volume)
|
||||
|
||||
@ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT)
|
||||
@mock.patch.object(common.HuaweiBaseDriver, '_get_volume_params',
|
||||
@mock.patch.object(huawei_utils, 'get_volume_params',
|
||||
return_value={'smarttier': 'true',
|
||||
'smartcache': 'true',
|
||||
'smartpartition': 'true',
|
||||
@ -3471,7 +3444,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
self.driver.create_volume, self.volume)
|
||||
|
||||
@ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT)
|
||||
@mock.patch.object(common.HuaweiBaseDriver, '_get_volume_params',
|
||||
@mock.patch.object(huawei_utils, 'get_volume_params',
|
||||
return_value={'smarttier': 'true',
|
||||
'smartcache': 'true',
|
||||
'smartpartition': 'true',
|
||||
@ -3539,7 +3512,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
(qos_id, lun_list) = self.driver.client.find_available_qos(qos)
|
||||
self.assertEqual(("11", u'["0", "1", "2"]'), (qos_id, lun_list))
|
||||
|
||||
@mock.patch.object(common.HuaweiBaseDriver, '_get_volume_params',
|
||||
@mock.patch.object(huawei_utils, 'get_volume_params',
|
||||
return_value=fake_hypermetro_opts)
|
||||
@mock.patch.object(rest_client.RestClient, 'get_all_pools',
|
||||
return_value=FAKE_STORAGE_POOL_RESPONSE)
|
||||
@ -3564,7 +3537,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
json.loads(lun_info['provider_location']))
|
||||
|
||||
@ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT)
|
||||
@mock.patch.object(common.HuaweiBaseDriver, '_get_volume_params',
|
||||
@mock.patch.object(huawei_utils, 'get_volume_params',
|
||||
return_value=fake_hypermetro_opts)
|
||||
@mock.patch.object(rest_client.RestClient, 'get_all_pools',
|
||||
return_value=FAKE_STORAGE_POOL_RESPONSE)
|
||||
@ -4010,10 +3983,8 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
@ddt.data(sync_replica_specs, async_replica_specs)
|
||||
def test_create_replication_success(self, mock_type):
|
||||
self.mock_object(replication.ReplicaCommonDriver, 'sync')
|
||||
self.mock_object(
|
||||
common.HuaweiBaseDriver,
|
||||
'_get_volume_type',
|
||||
return_value={'extra_specs': mock_type})
|
||||
self.replica_volume.volume_type = objects.VolumeType(
|
||||
extra_specs=mock_type, qos_specs_id=None)
|
||||
|
||||
model_update = self.driver.create_volume(self.replica_volume)
|
||||
driver_data = {'pair_id': TEST_PAIR_ID,
|
||||
@ -4113,10 +4084,9 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
def test_create_replication_fail(self, mock_module, mock_func,
|
||||
mock_value, pool_data):
|
||||
self.driver.support_func = pool_data
|
||||
self.mock_object(
|
||||
common.HuaweiBaseDriver,
|
||||
'_get_volume_type',
|
||||
return_value={'extra_specs': sync_replica_specs})
|
||||
self.replica_volume.volume_type = objects.VolumeType(
|
||||
extra_specs=sync_replica_specs, qos_specs_id=None)
|
||||
|
||||
self.mock_object(replication.ReplicaPairManager, '_delete_pair')
|
||||
self.mock_object(mock_module, mock_func, mock_value)
|
||||
self.assertRaises(
|
||||
@ -4150,7 +4120,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
|
||||
with mock.patch.object(rest_client.RestClient, 'get_lun_info',
|
||||
return_value=offline_status):
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.assertRaises(retrying.RetryError,
|
||||
replica.wait_volume_online,
|
||||
self.driver.client,
|
||||
lun_info)
|
||||
@ -4165,7 +4135,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
return_value={'SECRESACCESS': access_ro})
|
||||
|
||||
common_driver.wait_second_access(pair_id, access_ro)
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.assertRaises(retrying.RetryError,
|
||||
common_driver.wait_second_access, pair_id, access_rw)
|
||||
|
||||
def test_wait_replica_ready(self):
|
||||
@ -4282,8 +4252,8 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
old_replica_client = driver.replica_client
|
||||
old_replica = driver.replica
|
||||
self.mock_object(replication.ReplicaCommonDriver, 'failover')
|
||||
self.mock_object(common.HuaweiBaseDriver, '_get_volume_params',
|
||||
return_value={'replication_enabled': 'true'})
|
||||
self.mock_object(huawei_utils, 'get_volume_params',
|
||||
return_value={'replication_enabled': True})
|
||||
secondary_id, volumes_update, __ = driver.failover_host(
|
||||
None, [self.replica_volume], REPLICA_BACKEND_ID, [])
|
||||
self.assertEqual(REPLICA_BACKEND_ID, driver.active_backend_id)
|
||||
@ -4302,7 +4272,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
expect_location, json.loads(v_update['provider_location']))
|
||||
self.assertEqual('failed-over', v_update['replication_status'])
|
||||
|
||||
metadata = huawei_utils.get_lun_metadata(self.replica_volume)
|
||||
metadata = huawei_utils.get_volume_metadata(self.replica_volume)
|
||||
new_drv_data = {'pair_id': TEST_PAIR_ID,
|
||||
'rmt_lun_id': metadata['huawei_lun_id'],
|
||||
'rmt_lun_wwn': metadata['huawei_lun_wwn']}
|
||||
@ -4319,8 +4289,8 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
old_client = driver.client
|
||||
old_replica_client = driver.replica_client
|
||||
old_replica = driver.replica
|
||||
self.mock_object(common.HuaweiBaseDriver, '_get_volume_params',
|
||||
return_value={'replication_enabled': 'true'})
|
||||
self.mock_object(huawei_utils, 'get_volume_params',
|
||||
return_value={'replication_enabled': True})
|
||||
secondary_id, volumes_update, __ = driver.failover_host(
|
||||
None, [volume], REPLICA_BACKEND_ID, [])
|
||||
self.assertEqual(driver.active_backend_id, REPLICA_BACKEND_ID)
|
||||
@ -4338,8 +4308,8 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
self.mock_object(replication.ReplicaCommonDriver, 'enable')
|
||||
self.mock_object(replication.ReplicaCommonDriver, 'wait_replica_ready')
|
||||
self.mock_object(replication.ReplicaCommonDriver, 'failover')
|
||||
self.mock_object(common.HuaweiBaseDriver, '_get_volume_params',
|
||||
return_value={'replication_enabled': 'true'})
|
||||
self.mock_object(huawei_utils, 'get_volume_params',
|
||||
return_value={'replication_enabled': True})
|
||||
|
||||
volume = self.replica_volume
|
||||
|
||||
@ -4367,7 +4337,7 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
expect_location, json.loads(v_update['provider_location']))
|
||||
self.assertEqual('available', v_update['replication_status'])
|
||||
|
||||
metadata = huawei_utils.get_lun_metadata(self.replica_volume)
|
||||
metadata = huawei_utils.get_volume_metadata(self.replica_volume)
|
||||
new_drv_data = {'pair_id': TEST_PAIR_ID,
|
||||
'rmt_lun_id': metadata['huawei_lun_id'],
|
||||
'rmt_lun_wwn': metadata['huawei_lun_wwn']}
|
||||
@ -4376,8 +4346,8 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
||||
|
||||
@ddt.data({}, {'pair_id': TEST_PAIR_ID})
|
||||
def test_failback_replica_volumes_invalid_drv_data(self, mock_drv_data):
|
||||
self.mock_object(common.HuaweiBaseDriver, '_get_volume_params',
|
||||
return_value={'replication_enabled': 'true'})
|
||||
self.mock_object(huawei_utils, 'get_volume_params',
|
||||
return_value={'replication_enabled': True})
|
||||
|
||||
volume = self.replica_volume
|
||||
volume['replication_driver_data'] = replication.to_string(
|
||||
@ -4840,14 +4810,6 @@ class HuaweiFCDriverTestCase(HuaweiTestBase):
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.create_volume, self.volume)
|
||||
|
||||
def test_delete_volume_fail(self):
|
||||
self.driver.client.test_fail = True
|
||||
self.driver.delete_volume(self.volume)
|
||||
|
||||
def test_delete_snapshot_fail(self):
|
||||
self.driver.client.test_fail = True
|
||||
self.driver.delete_snapshot(self.snapshot)
|
||||
|
||||
def test_initialize_connection_fail(self):
|
||||
self.driver.client.test_fail = True
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
@ -4876,26 +4838,7 @@ class HuaweiFCDriverTestCase(HuaweiTestBase):
|
||||
self.driver.support_func = pool_data
|
||||
moved, model_update = self.driver.migrate_volume(None,
|
||||
self.volume,
|
||||
test_host,
|
||||
None)
|
||||
self.assertTrue(moved)
|
||||
self.assertEqual(empty_dict, model_update)
|
||||
|
||||
# Migrate volume with new type.
|
||||
empty_dict = {}
|
||||
new_type = {'extra_specs':
|
||||
{'smarttier': '<is> true',
|
||||
'smartcache': '<is> true',
|
||||
'smartpartition': '<is> true',
|
||||
'thin_provisioning_support': '<is> true',
|
||||
'thick_provisioning_support': '<is> False',
|
||||
'policy': '2',
|
||||
'smartcache:cachename': 'cache-test',
|
||||
'smartpartition:partitionname': 'partition-test'}}
|
||||
moved, model_update = self.driver.migrate_volume(None,
|
||||
self.volume,
|
||||
test_host,
|
||||
new_type)
|
||||
test_host)
|
||||
self.assertTrue(moved)
|
||||
self.assertEqual(empty_dict, model_update)
|
||||
|
||||
@ -4905,21 +4848,7 @@ class HuaweiFCDriverTestCase(HuaweiTestBase):
|
||||
# Migrate volume without new type.
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.migrate_volume, None,
|
||||
self.volume, test_host, None)
|
||||
|
||||
# Migrate volume with new type.
|
||||
new_type = {'extra_specs':
|
||||
{'smarttier': '<is> true',
|
||||
'smartcache': '<is> true',
|
||||
'thin_provisioning_support': '<is> true',
|
||||
'thick_provisioning_support': '<is> False',
|
||||
'policy': '2',
|
||||
'smartcache:cachename': 'cache-test',
|
||||
'partitionname': 'partition-test'}}
|
||||
self.driver.client.test_fail = True
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.migrate_volume, None,
|
||||
self.volume, test_host, new_type)
|
||||
self.volume, test_host)
|
||||
|
||||
def test_check_migration_valid(self):
|
||||
is_valid = self.driver._check_migration_valid(test_host,
|
||||
@ -5060,12 +4989,19 @@ class HuaweiFCDriverTestCase(HuaweiTestBase):
|
||||
self.driver.support_func = pool_data
|
||||
self.mock_object(mock_module, mock_func, side_effect=side_effect)
|
||||
self.mock_object(rest_client.RestClient, 'add_lun_to_partition')
|
||||
self.mock_object(
|
||||
common.HuaweiBaseDriver,
|
||||
'_get_volume_type',
|
||||
return_value={'extra_specs': sync_replica_specs})
|
||||
|
||||
if mock_func == 'create_replica':
|
||||
extra_specs = {}
|
||||
new_type = test_new_replication_type
|
||||
else:
|
||||
extra_specs = {'capabilities:replication_enabled': '<is> True'}
|
||||
new_type = {'extra_specs': {},
|
||||
}
|
||||
|
||||
self.volume.volume_type = objects.VolumeType(
|
||||
extra_specs=extra_specs, qos_specs_id=None)
|
||||
retype = self.driver.retype(None, self.volume,
|
||||
test_new_replication_type, None, test_host)
|
||||
new_type, None, test_host)
|
||||
self.assertFalse(retype)
|
||||
|
||||
@ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT)
|
||||
@ -5088,7 +5024,6 @@ class HuaweiFCDriverTestCase(HuaweiTestBase):
|
||||
|
||||
@mock.patch.object(rest_client.RestClient, 'add_lun_to_partition')
|
||||
def test_retype_volume_fail(self, mock_add_lun_to_partition):
|
||||
|
||||
self.driver.support_func = FAKE_POOLS_SUPPORT_REPORT
|
||||
mock_add_lun_to_partition.side_effect = (
|
||||
exception.VolumeBackendAPIException(data='Error occurred.'))
|
||||
@ -5240,7 +5175,7 @@ class HuaweiFCDriverTestCase(HuaweiTestBase):
|
||||
None)
|
||||
self.assertEqual(expected_pool_capacity, pool_capacity)
|
||||
|
||||
@mock.patch.object(common.HuaweiBaseDriver, '_get_volume_params',
|
||||
@mock.patch.object(huawei_utils, 'get_volume_params',
|
||||
return_value=fake_hypermetro_opts)
|
||||
@mock.patch.object(rest_client.RestClient, 'get_all_pools',
|
||||
return_value=FAKE_STORAGE_POOL_RESPONSE)
|
||||
@ -5273,7 +5208,7 @@ class HuaweiFCDriverTestCase(HuaweiTestBase):
|
||||
json.loads(lun_info['provider_location']))
|
||||
|
||||
@ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT)
|
||||
@mock.patch.object(common.HuaweiBaseDriver, '_get_volume_params',
|
||||
@mock.patch.object(huawei_utils, 'get_volume_params',
|
||||
return_value=fake_hypermetro_opts)
|
||||
@mock.patch.object(rest_client.RestClient, 'get_all_pools',
|
||||
return_value=FAKE_STORAGE_POOL_RESPONSE)
|
||||
@ -5299,7 +5234,7 @@ class HuaweiFCDriverTestCase(HuaweiTestBase):
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.metro.create_hypermetro, "11", {})
|
||||
|
||||
@mock.patch.object(huawei_driver.huawei_utils, 'get_lun_metadata',
|
||||
@mock.patch.object(huawei_utils, 'get_volume_private_data',
|
||||
return_value={'hypermetro_id': '3400a30d844d0007',
|
||||
'remote_lun_id': '1'})
|
||||
@mock.patch.object(rest_client.RestClient, 'do_mapping',
|
||||
@ -5313,13 +5248,13 @@ class HuaweiFCDriverTestCase(HuaweiTestBase):
|
||||
mock_map.assert_called_once_with('1', '0', '1',
|
||||
hypermetro_lun=True)
|
||||
|
||||
@mock.patch.object(huawei_driver.huawei_utils, 'get_lun_metadata',
|
||||
@mock.patch.object(huawei_driver.huawei_utils, 'get_volume_metadata',
|
||||
return_value={'hypermetro_id': '3400a30d844d0007',
|
||||
'remote_lun_id': '1'})
|
||||
def test_terminate_hypermetro_connection_success(self, mock_metradata):
|
||||
self.driver.metro.disconnect_volume_fc(self.volume, FakeConnector)
|
||||
|
||||
@mock.patch.object(huawei_driver.huawei_utils, 'get_lun_metadata',
|
||||
@mock.patch.object(huawei_utils, 'get_volume_private_data',
|
||||
return_value={'hypermetro_id': '3400a30d844d0007',
|
||||
'remote_lun_id': None})
|
||||
@mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name',
|
||||
@ -5334,7 +5269,7 @@ class HuaweiFCDriverTestCase(HuaweiTestBase):
|
||||
flag = self.driver.metro._wait_volume_ready("11")
|
||||
self.assertIsNone(flag)
|
||||
|
||||
@mock.patch.object(huawei_driver.huawei_utils, 'get_lun_metadata',
|
||||
@mock.patch.object(huawei_driver.huawei_utils, 'get_volume_metadata',
|
||||
return_value={'hypermetro_id': '3400a30d844d0007',
|
||||
'remote_lun_id': '1'})
|
||||
@mock.patch.object(rest_client.RestClient, 'get_online_free_wwns',
|
||||
@ -5350,17 +5285,15 @@ class HuaweiFCDriverTestCase(HuaweiTestBase):
|
||||
FakeConnector)
|
||||
|
||||
def test_create_snapshot_fail_hypermetro(self):
|
||||
self.mock_object(
|
||||
common.HuaweiBaseDriver,
|
||||
'_get_volume_type',
|
||||
return_value={'extra_specs': replica_hypermetro_specs})
|
||||
self.volume.volume_type = objects.VolumeType(
|
||||
extra_specs=replica_hypermetro_specs, qos_specs_id=None)
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.create_volume_from_snapshot,
|
||||
self.volume, self.snapshot)
|
||||
|
||||
def test_create_snapshot_fail_no_snapshot_id(self):
|
||||
self.snapshot.provider_location = None
|
||||
self.mock_object(rest_client.RestClient, 'get_snapshot_id_by_name',
|
||||
self.mock_object(self.driver.client, 'get_snapshot_info_by_name',
|
||||
return_value=None)
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.create_volume_from_snapshot,
|
||||
@ -5438,6 +5371,8 @@ class HuaweiFCDriverTestCase(HuaweiTestBase):
|
||||
delete_snap_mock = self.mock_object(
|
||||
self.driver, '_delete_group_snapshot',
|
||||
wraps=self.driver._delete_group_snapshot)
|
||||
self.mock_object(self.driver.client, 'get_snapshot_info_by_name',
|
||||
return_value={'ID': ID})
|
||||
|
||||
model_update, volumes_model_update = self.driver.create_group_from_src(
|
||||
None, self.group, [self.volume], snapshots=snapshots,
|
||||
@ -5469,13 +5404,13 @@ class HuaweiFCDriverTestCase(HuaweiTestBase):
|
||||
|
||||
@mock.patch.object(common.HuaweiBaseDriver, '_get_group_type',
|
||||
return_value=[{"hypermetro": "true"}])
|
||||
@mock.patch.object(huawei_driver.huawei_utils, 'get_lun_metadata',
|
||||
@mock.patch.object(huawei_driver.huawei_utils, 'get_volume_metadata',
|
||||
return_value={'hypermetro_id': '3400a30d844d0007',
|
||||
'remote_lun_id': '59'})
|
||||
def test_update_group_success(self, mock_grouptype, mock_metadata):
|
||||
ctxt = context.get_admin_context()
|
||||
add_volumes = [self.volume]
|
||||
remove_volumes = [self.volume]
|
||||
add_volumes = [self.hyper_volume]
|
||||
remove_volumes = []
|
||||
self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type',
|
||||
return_value=True)
|
||||
model_update = self.driver.update_group(
|
||||
|
@ -221,90 +221,6 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
|
||||
return volume_type
|
||||
|
||||
def _get_volume_params(self, volume_type):
|
||||
"""Return the parameters for creating the volume."""
|
||||
specs = {}
|
||||
if volume_type:
|
||||
specs = dict(volume_type).get('extra_specs')
|
||||
|
||||
opts = self._get_volume_params_from_specs(specs)
|
||||
return opts
|
||||
|
||||
def _get_volume_params_from_specs(self, specs):
|
||||
"""Return the volume parameters from extra specs."""
|
||||
opts_capability = {
|
||||
'smarttier': False,
|
||||
'smartcache': False,
|
||||
'smartpartition': False,
|
||||
'thin_provisioning_support': False,
|
||||
'thick_provisioning_support': False,
|
||||
'hypermetro': False,
|
||||
'replication_enabled': False,
|
||||
'replication_type': 'async',
|
||||
}
|
||||
|
||||
opts_value = {
|
||||
'policy': None,
|
||||
'partitionname': None,
|
||||
'cachename': None,
|
||||
}
|
||||
|
||||
opts_associate = {
|
||||
'smarttier': 'policy',
|
||||
'smartcache': 'cachename',
|
||||
'smartpartition': 'partitionname',
|
||||
}
|
||||
|
||||
opts = self._get_opts_from_specs(opts_capability,
|
||||
opts_value,
|
||||
opts_associate,
|
||||
specs)
|
||||
opts = smartx.SmartX().get_smartx_specs_opts(opts)
|
||||
opts = replication.get_replication_opts(opts)
|
||||
LOG.debug('volume opts %(opts)s.', {'opts': opts})
|
||||
return opts
|
||||
|
||||
def _get_opts_from_specs(self, opts_capability, opts_value,
|
||||
opts_associate, specs):
|
||||
"""Get the well defined extra specs."""
|
||||
opts = {}
|
||||
opts.update(opts_capability)
|
||||
opts.update(opts_value)
|
||||
|
||||
for key, value in specs.items():
|
||||
# Get the scope, if it is using scope format.
|
||||
scope = None
|
||||
key_split = key.split(':')
|
||||
if len(key_split) > 2 and key_split[0] != "capabilities":
|
||||
continue
|
||||
|
||||
if len(key_split) == 1:
|
||||
key = key_split[0].lower()
|
||||
else:
|
||||
scope = key_split[0].lower()
|
||||
key = key_split[1].lower()
|
||||
|
||||
if ((not scope or scope == 'capabilities')
|
||||
and key in opts_capability):
|
||||
words = value.split()
|
||||
if words and len(words) == 2 and words[0] in ('<is>', '<in>'):
|
||||
opts[key] = words[1].lower()
|
||||
elif key == 'replication_type':
|
||||
LOG.error("Extra specs must be specified as "
|
||||
"replication_type='<in> sync' or "
|
||||
"'<in> async'.")
|
||||
else:
|
||||
LOG.error("Extra specs must be specified as "
|
||||
"capabilities:%s='<is> True'.", key)
|
||||
|
||||
if ((scope in opts_capability)
|
||||
and (key in opts_value)
|
||||
and (scope in opts_associate)
|
||||
and (opts_associate[scope] == key)):
|
||||
opts[key] = value
|
||||
|
||||
return opts
|
||||
|
||||
def _get_lun_params(self, volume, opts):
|
||||
pool_name = volume_utils.extract_host(volume.host, level='pool')
|
||||
params = {
|
||||
@ -314,7 +230,7 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
'PARENTID': self.client.get_pool_id(pool_name),
|
||||
'DESCRIPTION': volume.name,
|
||||
'ALLOCTYPE': opts.get('LUNType', self.configuration.lun_type),
|
||||
'CAPACITY': huawei_utils.get_volume_size(volume),
|
||||
'CAPACITY': int(volume.size) * constants.CAPACITY_UNIT,
|
||||
'READCACHEPOLICY': self.configuration.lun_read_cache_policy,
|
||||
'WRITECACHEPOLICY': self.configuration.lun_write_cache_policy,
|
||||
}
|
||||
@ -341,7 +257,7 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
|
||||
return lun_info, model_update
|
||||
|
||||
def _create_base_type_volume(self, opts, volume, volume_type):
|
||||
def _create_base_type_volume(self, opts, volume):
|
||||
"""Create volume and add some base type.
|
||||
|
||||
Base type is the service type which doesn't conflict with the other.
|
||||
@ -351,21 +267,17 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
lun_id = lun_info['ID']
|
||||
|
||||
try:
|
||||
qos = smartx.SmartQos.get_qos_by_volume_type(volume_type)
|
||||
if qos:
|
||||
if not self.support_func.get('QoS_support'):
|
||||
msg = (_("Can't support qos on the array"))
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
else:
|
||||
smart_qos = smartx.SmartQos(self.client)
|
||||
smart_qos.add(qos, lun_id)
|
||||
if opts.get('qos'):
|
||||
smartqos = smartx.SmartQos(self.client)
|
||||
smartqos.add(opts['qos'], lun_id)
|
||||
|
||||
smartpartition = smartx.SmartPartition(self.client)
|
||||
smartpartition.add(opts, lun_id)
|
||||
if opts.get('smartpartition'):
|
||||
smartpartition = smartx.SmartPartition(self.client)
|
||||
smartpartition.add(opts['partitionname'], lun_id)
|
||||
|
||||
smartcache = smartx.SmartCache(self.client)
|
||||
smartcache.add(opts, lun_id)
|
||||
if opts.get('smartcache'):
|
||||
smartcache = smartx.SmartCache(self.client)
|
||||
smartcache.add(opts['cachename'], lun_id)
|
||||
except Exception as err:
|
||||
self._delete_lun_with_check(lun_id)
|
||||
msg = _('Create volume error. Because %s.') % six.text_type(err)
|
||||
@ -381,7 +293,7 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
So add it after those services.
|
||||
"""
|
||||
lun_id = lun_info['ID']
|
||||
if opts.get('hypermetro') == 'true':
|
||||
if opts.get('hypermetro'):
|
||||
metro = hypermetro.HuaweiHyperMetro(self.client,
|
||||
self.rmt_client,
|
||||
self.configuration)
|
||||
@ -393,7 +305,7 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
self._delete_lun_with_check(lun_id)
|
||||
raise
|
||||
|
||||
if opts.get('replication_enabled') == 'true':
|
||||
if opts.get('replication_enabled'):
|
||||
replica_model = opts.get('replication_type')
|
||||
try:
|
||||
replica_info = self.replica.create_replica(lun_info,
|
||||
@ -408,17 +320,15 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
|
||||
def create_volume(self, volume):
|
||||
"""Create a volume."""
|
||||
volume_type = self._get_volume_type(volume)
|
||||
opts = self._get_volume_params(volume_type)
|
||||
if (opts.get('hypermetro') == 'true'
|
||||
and opts.get('replication_enabled') == 'true'):
|
||||
opts = huawei_utils.get_volume_params(volume)
|
||||
if opts.get('hypermetro') and opts.get('replication_enabled'):
|
||||
err_msg = _("Hypermetro and Replication can not be "
|
||||
"used in the same volume_type.")
|
||||
LOG.error(err_msg)
|
||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||
|
||||
lun_params, lun_info, model_update = (
|
||||
self._create_base_type_volume(opts, volume, volume_type))
|
||||
lun_params, lun_info, model_update = self._create_base_type_volume(
|
||||
opts, volume)
|
||||
|
||||
model_update = self._add_extend_type_to_volume(opts, lun_params,
|
||||
lun_info, model_update)
|
||||
@ -429,10 +339,11 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
return model_update
|
||||
|
||||
def _delete_volume(self, volume):
|
||||
lun_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume)
|
||||
if not lun_id:
|
||||
lun_info = huawei_utils.get_lun_info(self.client, volume)
|
||||
if not lun_info:
|
||||
return
|
||||
|
||||
lun_id = lun_info['ID']
|
||||
lun_group_ids = self.client.get_lungroupids_by_lunid(lun_id)
|
||||
if lun_group_ids and len(lun_group_ids) == 1:
|
||||
self.client.remove_lun_from_lungroup(lun_group_ids[0], lun_id)
|
||||
@ -458,7 +369,7 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
smart_qos = smartx.SmartQos(self.client)
|
||||
smart_qos.remove(qos_id, lun_id)
|
||||
|
||||
metadata = huawei_utils.get_lun_metadata(volume)
|
||||
metadata = huawei_utils.get_volume_private_data(volume)
|
||||
if metadata.get('hypermetro_id'):
|
||||
metro = hypermetro.HuaweiHyperMetro(self.client,
|
||||
self.rmt_client,
|
||||
@ -576,9 +487,15 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
def update_migrated_volume(self, ctxt, volume, new_volume,
|
||||
original_volume_status=None):
|
||||
orig_lun_name = huawei_utils.encode_name(volume.id)
|
||||
new_lun_id, lun_wwn = huawei_utils.get_volume_lun_id(
|
||||
new_lun_info = huawei_utils.get_lun_info(
|
||||
self.client, new_volume)
|
||||
new_metadata = huawei_utils.get_lun_metadata(new_volume)
|
||||
if not new_lun_info:
|
||||
msg = _("Volume %s doesn't exist.") % volume.id
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
new_lun_id = new_lun_info['ID']
|
||||
new_metadata = huawei_utils.get_volume_private_data(new_volume)
|
||||
model_update = {
|
||||
'provider_location': huawei_utils.to_string(**new_metadata),
|
||||
}
|
||||
@ -596,7 +513,7 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
|
||||
return model_update
|
||||
|
||||
def migrate_volume(self, ctxt, volume, host, new_type=None):
|
||||
def migrate_volume(self, ctxt, volume, host):
|
||||
"""Migrate a volume within the same array."""
|
||||
self._check_volume_exist_on_array(volume,
|
||||
constants.VOLUME_NOT_EXISTS_RAISE)
|
||||
@ -604,12 +521,11 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
# NOTE(jlc): Replication volume can't migrate. But retype
|
||||
# can remove replication relationship first then do migrate.
|
||||
# So don't add this judgement into _check_migration_valid().
|
||||
volume_type = self._get_volume_type(volume)
|
||||
opts = self._get_volume_params(volume_type)
|
||||
if opts.get('replication_enabled') == 'true':
|
||||
opts = huawei_utils.get_volume_params(volume)
|
||||
if opts.get('replication_enabled'):
|
||||
return (False, None)
|
||||
|
||||
return self._migrate_volume(volume, host, new_type)
|
||||
return self._migrate_volume(volume, host)
|
||||
|
||||
def _check_migration_valid(self, host, volume):
|
||||
if 'pool_name' not in host['capabilities']:
|
||||
@ -637,35 +553,24 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
if not self._check_migration_valid(host, volume):
|
||||
return (False, None)
|
||||
|
||||
type_id = volume.volume_type_id
|
||||
|
||||
volume_type = None
|
||||
if type_id:
|
||||
volume_type = volume_types.get_volume_type(None, type_id)
|
||||
|
||||
pool_name = host['capabilities']['pool_name']
|
||||
pools = self.client.get_all_pools()
|
||||
pool_info = self.client.get_pool_info(pool_name, pools)
|
||||
dst_volume_name = six.text_type(uuid.uuid4())
|
||||
|
||||
src_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume)
|
||||
opts = None
|
||||
qos = None
|
||||
lun_info = huawei_utils.get_lun_info(self.client, volume)
|
||||
if not lun_info:
|
||||
msg = _("Volume %s doesn't exist.") % volume.id
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
src_id = lun_info['ID']
|
||||
|
||||
if new_type:
|
||||
# If new type exists, use new type.
|
||||
new_specs = new_type['extra_specs']
|
||||
opts = self._get_volume_params_from_specs(new_specs)
|
||||
if 'LUNType' not in opts:
|
||||
opts['LUNType'] = self.configuration.lun_type
|
||||
|
||||
qos = smartx.SmartQos.get_qos_by_volume_type(new_type)
|
||||
elif volume_type:
|
||||
qos = smartx.SmartQos.get_qos_by_volume_type(volume_type)
|
||||
|
||||
if not opts:
|
||||
opts = self._get_volume_params(volume_type)
|
||||
|
||||
lun_info = self.client.get_lun_info(src_id)
|
||||
opts = huawei_utils.get_volume_type_params(new_type)
|
||||
else:
|
||||
opts = huawei_utils.get_volume_params(volume)
|
||||
|
||||
if opts['policy']:
|
||||
policy = opts['policy']
|
||||
@ -698,15 +603,15 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
lun_info = self.client.create_lun(lun_params)
|
||||
lun_id = lun_info['ID']
|
||||
|
||||
if qos:
|
||||
LOG.info('QoS: %s.', qos)
|
||||
if opts.get('qos'):
|
||||
SmartQos = smartx.SmartQos(self.client)
|
||||
SmartQos.add(qos, lun_id)
|
||||
if opts:
|
||||
SmartQos.add(opts['qos'], lun_id)
|
||||
if opts.get('smartpartition'):
|
||||
smartpartition = smartx.SmartPartition(self.client)
|
||||
smartpartition.add(opts, lun_id)
|
||||
smartpartition.add(opts['partitionname'], lun_id)
|
||||
if opts.get('smartcache'):
|
||||
smartcache = smartx.SmartCache(self.client)
|
||||
smartcache.add(opts, lun_id)
|
||||
smartcache.add(opts['cachename'], lun_id)
|
||||
|
||||
dst_id = lun_info['ID']
|
||||
self._wait_volume_ready(dst_id)
|
||||
@ -720,24 +625,24 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
We use LUNcopy to copy a new volume from snapshot.
|
||||
The time needed increases as volume size does.
|
||||
"""
|
||||
volume_type = self._get_volume_type(volume)
|
||||
opts = self._get_volume_params(volume_type)
|
||||
if (opts.get('hypermetro') == 'true'
|
||||
and opts.get('replication_enabled') == 'true'):
|
||||
opts = huawei_utils.get_volume_params(volume)
|
||||
if opts.get('hypermetro') and opts.get('replication_enabled'):
|
||||
msg = _("Hypermetro and Replication can not be "
|
||||
"used in the same volume_type.")
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
snapshot_id = huawei_utils.get_snapshot_id(self.client, snapshot)
|
||||
if snapshot_id is None:
|
||||
snapshot_info = huawei_utils.get_snapshot_info(self.client, snapshot)
|
||||
if not snapshot_info:
|
||||
msg = _('create_volume_from_snapshot: Snapshot %(name)s '
|
||||
'does not exist.') % {'name': snapshot.id}
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
lun_params, lun_info, model_update = (
|
||||
self._create_base_type_volume(opts, volume, volume_type))
|
||||
snapshot_id = snapshot_info['ID']
|
||||
|
||||
lun_params, lun_info, model_update = self._create_base_type_volume(
|
||||
opts, volume)
|
||||
|
||||
tgt_lun_id = lun_info['ID']
|
||||
luncopy_name = huawei_utils.encode_name(volume.id)
|
||||
@ -810,8 +715,8 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
If the volume exists on the array, return the LUN ID.
|
||||
If not exists, raise or log warning.
|
||||
"""
|
||||
lun_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume)
|
||||
if not lun_id:
|
||||
lun_info = huawei_utils.get_lun_info(self.client, volume)
|
||||
if not lun_info:
|
||||
msg = _("Volume %s does not exist on the array.") % volume.id
|
||||
if action == constants.VOLUME_NOT_EXISTS_WARN:
|
||||
LOG.warning(msg)
|
||||
@ -819,27 +724,15 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
return
|
||||
|
||||
if not lun_wwn:
|
||||
LOG.debug("No LUN WWN recorded for volume %s.", volume.id)
|
||||
|
||||
if not self.client.check_lun_exist(lun_id, lun_wwn):
|
||||
msg = (_("Volume %s does not exist on the array.")
|
||||
% volume.id)
|
||||
if action == constants.VOLUME_NOT_EXISTS_WARN:
|
||||
LOG.warning(msg)
|
||||
if action == constants.VOLUME_NOT_EXISTS_RAISE:
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
return
|
||||
return lun_id
|
||||
return lun_info['ID']
|
||||
|
||||
def extend_volume(self, volume, new_size):
|
||||
"""Extend a volume."""
|
||||
lun_id = self._check_volume_exist_on_array(
|
||||
volume, constants.VOLUME_NOT_EXISTS_RAISE)
|
||||
|
||||
volume_type = self._get_volume_type(volume)
|
||||
opts = self._get_volume_params(volume_type)
|
||||
if opts.get('replication_enabled') == 'true':
|
||||
opts = huawei_utils.get_volume_params(volume)
|
||||
if opts.get('replication_enabled'):
|
||||
msg = (_("Can't extend replication volume, volume: %(id)s") %
|
||||
{"id": volume.id})
|
||||
LOG.error(msg)
|
||||
@ -875,17 +768,16 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
self.client.extend_lun(lun_id, new_size)
|
||||
|
||||
def _create_snapshot_base(self, snapshot):
|
||||
volume = snapshot.volume
|
||||
if not volume:
|
||||
msg = _("Can't get volume id from snapshot, snapshot: %(id)s"
|
||||
) % {'id': snapshot.id}
|
||||
lun_info = huawei_utils.get_lun_info(self.client, snapshot.volume)
|
||||
if not lun_info:
|
||||
msg = _("Parent volume of snapshot %s doesn't exist."
|
||||
) % snapshot.id
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
lun_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume)
|
||||
snapshot_name = huawei_utils.encode_name(snapshot.id)
|
||||
snapshot_description = snapshot.id
|
||||
snapshot_info = self.client.create_snapshot(lun_id,
|
||||
snapshot_info = self.client.create_snapshot(lun_info['ID'],
|
||||
snapshot_name,
|
||||
snapshot_description)
|
||||
snapshot_id = snapshot_info['ID']
|
||||
@ -910,10 +802,10 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
def delete_snapshot(self, snapshot):
|
||||
LOG.info('Delete snapshot %s.', snapshot.id)
|
||||
|
||||
snapshot_id = huawei_utils.get_snapshot_id(self.client, snapshot)
|
||||
if snapshot_id and self.client.check_snapshot_exist(snapshot_id):
|
||||
self.client.stop_snapshot(snapshot_id)
|
||||
self.client.delete_snapshot(snapshot_id)
|
||||
snapshot_info = huawei_utils.get_snapshot_info(self.client, snapshot)
|
||||
if snapshot_info:
|
||||
self.client.stop_snapshot(snapshot_info['ID'])
|
||||
self.client.delete_snapshot(snapshot_info['ID'])
|
||||
else:
|
||||
LOG.warning("Can't find snapshot on the array.")
|
||||
|
||||
@ -934,7 +826,7 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
model_update = {}
|
||||
replica_enabled_change = change_opts.get('replication_enabled')
|
||||
replica_type_change = change_opts.get('replication_type')
|
||||
if replica_enabled_change and replica_enabled_change[0] == 'true':
|
||||
if replica_enabled_change and replica_enabled_change[0]:
|
||||
try:
|
||||
self.replica.delete_replica(volume)
|
||||
model_update.update({'replication_status': 'disabled',
|
||||
@ -960,7 +852,7 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
LOG.exception('Retype volume error.')
|
||||
return False
|
||||
|
||||
if replica_enabled_change and replica_enabled_change[1] == 'true':
|
||||
if replica_enabled_change and replica_enabled_change[1]:
|
||||
try:
|
||||
# If replica_enabled_change is not None, the
|
||||
# replica_type_change won't be None. See function
|
||||
@ -1081,15 +973,13 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
new_qos = smartx.SmartQos.get_qos_by_volume_type(new_type)
|
||||
if not self.support_func.get('QoS_support'):
|
||||
if new_qos:
|
||||
if new_opts['qos']:
|
||||
if not self.support_func.get('QoS_support'):
|
||||
msg = (_("Can't support qos on the array."))
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
def _check_needed_changes(self, lun_id, old_opts, new_opts,
|
||||
change_opts, new_type):
|
||||
def _check_needed_changes(self, lun_id, old_opts, new_opts, change_opts):
|
||||
new_cache_id = None
|
||||
new_cache_name = new_opts['cachename']
|
||||
if new_cache_name:
|
||||
@ -1155,7 +1045,7 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
new_partition_name])
|
||||
|
||||
# smartqos
|
||||
new_qos = smartx.SmartQos.get_qos_by_volume_type(new_type)
|
||||
new_qos = new_opts.get('qos')
|
||||
if not self.support_func.get('QoS_support'):
|
||||
if new_qos:
|
||||
msg = (_("Can't support qos on the array."))
|
||||
@ -1182,11 +1072,16 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
'replication_type': None,
|
||||
}
|
||||
|
||||
lun_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume)
|
||||
lun_info = huawei_utils.get_lun_info(self.client, volume)
|
||||
if not lun_info:
|
||||
msg = _("Volume %s doesn't exist.") % volume.id
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
lun_id = lun_info['ID']
|
||||
old_opts = self.get_lun_specs(lun_id)
|
||||
|
||||
new_specs = new_type['extra_specs']
|
||||
new_opts = self._get_volume_params_from_specs(new_specs)
|
||||
new_opts = huawei_utils.get_volume_type_params(new_type)
|
||||
|
||||
if 'LUNType' not in new_opts:
|
||||
new_opts['LUNType'] = self.configuration.lun_type
|
||||
@ -1198,10 +1093,9 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
migration = True
|
||||
change_opts['LUNType'] = (old_opts['LUNType'], new_opts['LUNType'])
|
||||
|
||||
volume_type = self._get_volume_type(volume)
|
||||
volume_opts = self._get_volume_params(volume_type)
|
||||
if (volume_opts['replication_enabled'] == 'true'
|
||||
or new_opts['replication_enabled'] == 'true'):
|
||||
volume_opts = huawei_utils.get_volume_params(volume)
|
||||
if (volume_opts['replication_enabled'] or
|
||||
new_opts['replication_enabled']):
|
||||
# If replication_enabled changes,
|
||||
# then replication_type in change_opts will be set.
|
||||
change_opts['replication_enabled'] = (
|
||||
@ -1212,7 +1106,7 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
new_opts['replication_type'])
|
||||
|
||||
change_opts = self._check_needed_changes(lun_id, old_opts, new_opts,
|
||||
change_opts, new_type)
|
||||
change_opts)
|
||||
|
||||
LOG.debug("Determine changes when retype. Migration: "
|
||||
"%(migration)s, change_opts: %(change_opts)s.",
|
||||
@ -1434,8 +1328,7 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
# Handle volume type if specified.
|
||||
old_opts = self.get_lun_specs(lun_id)
|
||||
volume_type = volume_types.get_volume_type(None, type_id)
|
||||
new_specs = volume_type.get('extra_specs')
|
||||
new_opts = self._get_volume_params_from_specs(new_specs)
|
||||
new_opts = huawei_utils.get_volume_type_params(volume_type)
|
||||
if ('LUNType' in new_opts and
|
||||
old_opts['LUNType'] != new_opts['LUNType']):
|
||||
msg = (_("Can't import LUN %(lun_id)s to Cinder. "
|
||||
@ -1447,9 +1340,8 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
change_opts = {'policy': None, 'partitionid': None,
|
||||
'cacheid': None, 'qos': None}
|
||||
|
||||
change_opts = self._check_needed_changes(lun_id, old_opts,
|
||||
new_opts, change_opts,
|
||||
volume_type)
|
||||
change_opts = self._check_needed_changes(
|
||||
lun_id, old_opts, new_opts, change_opts)
|
||||
self.modify_lun(lun_id, change_opts)
|
||||
|
||||
# Rename the LUN to make it manageable for Cinder.
|
||||
@ -1502,7 +1394,7 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
def manage_existing_get_size(self, volume, external_ref):
|
||||
"""Get the size of the existing volume."""
|
||||
lun_info = self._get_lun_info_by_ref(external_ref)
|
||||
size = int(math.ceil(lun_info.get('CAPACITY') /
|
||||
size = int(math.ceil(float(lun_info.get('CAPACITY')) /
|
||||
constants.CAPACITY_UNIT))
|
||||
return size
|
||||
|
||||
@ -1546,9 +1438,10 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
snapshot_info = self._get_snapshot_info_by_ref(existing_ref)
|
||||
snapshot_id = snapshot_info.get('ID')
|
||||
|
||||
parent_lun_id, lun_wwn = huawei_utils.get_volume_lun_id(
|
||||
parent_lun_info = huawei_utils.get_lun_info(
|
||||
self.client, snapshot.volume)
|
||||
if parent_lun_id != snapshot_info.get('PARENTID'):
|
||||
if (not parent_lun_info or
|
||||
parent_lun_info['ID'] != snapshot_info.get('PARENTID')):
|
||||
msg = (_("Can't import snapshot %s to Cinder. "
|
||||
"Snapshot doesn't belong to volume."), snapshot_id)
|
||||
raise exception.ManageExistingInvalidReference(
|
||||
@ -1574,7 +1467,7 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
|
||||
"""Get the size of the existing snapshot."""
|
||||
snapshot_info = self._get_snapshot_info_by_ref(existing_ref)
|
||||
size = int(math.ceil(snapshot_info.get('USERCAPACITY') /
|
||||
size = int(math.ceil(float(snapshot_info.get('USERCAPACITY')) /
|
||||
constants.CAPACITY_UNIT))
|
||||
return size
|
||||
|
||||
@ -1594,8 +1487,7 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
def _get_group_type(self, group):
|
||||
opts = []
|
||||
for vol_type in group.volume_types:
|
||||
specs = vol_type.extra_specs
|
||||
opts.append(self._get_volume_params_from_specs(specs))
|
||||
opts.append(huawei_utils.get_volume_type_params(vol_type))
|
||||
|
||||
return opts
|
||||
|
||||
@ -1818,9 +1710,8 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
replica_volumes = []
|
||||
|
||||
for v in volumes:
|
||||
volume_type = self._get_volume_type(v)
|
||||
opts = self._get_volume_params(volume_type)
|
||||
if opts.get('replication_enabled') == 'true':
|
||||
opts = huawei_utils.get_volume_params(v)
|
||||
if opts.get('replication_enabled'):
|
||||
replica_volumes.append(v)
|
||||
else:
|
||||
normal_volumes.append(v)
|
||||
@ -1938,7 +1829,7 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
|
||||
def get_lun_id_and_type(self, volume):
|
||||
if hasattr(volume, 'lun_type'):
|
||||
metadata = huawei_utils.get_snapshot_metadata(volume)
|
||||
metadata = huawei_utils.get_snapshot_private_data(volume)
|
||||
lun_id = metadata['huawei_snapshot_id']
|
||||
lun_type = constants.SNAPSHOT_TYPE
|
||||
else:
|
||||
|
@ -19,7 +19,7 @@ STATUS_RUNNING = '10'
|
||||
STATUS_VOLUME_READY = '27'
|
||||
STATUS_LUNCOPY_READY = '40'
|
||||
STATUS_QOS_ACTIVE = '2'
|
||||
STATUS_QOS_INACTIVE = '45'
|
||||
QOS_INACTIVATED = '45'
|
||||
LUN_TYPE = '11'
|
||||
SNAPSHOT_TYPE = '27'
|
||||
|
||||
@ -36,7 +36,7 @@ ARRAY_VERSION = 'V300R003C00'
|
||||
FC_PORT_CONNECTED = '10'
|
||||
FC_INIT_ONLINE = '27'
|
||||
FC_PORT_MODE_FABRIC = '0'
|
||||
CAPACITY_UNIT = 1024.0 * 1024.0 * 2
|
||||
CAPACITY_UNIT = 1024 * 1024 * 2
|
||||
DEFAULT_WAIT_TIMEOUT = 3600 * 24 * 30
|
||||
DEFAULT_WAIT_INTERVAL = 5
|
||||
|
||||
|
@ -331,7 +331,7 @@ class HuaweiFCDriver(common.HuaweiBaseDriver, driver.FibreChannelDriver):
|
||||
# Add host into hostgroup.
|
||||
hostgroup_id = self.client.add_host_to_hostgroup(host_id)
|
||||
|
||||
metadata = huawei_utils.get_lun_metadata(volume)
|
||||
metadata = huawei_utils.get_volume_private_data(volume)
|
||||
LOG.info("initialize_connection, metadata is: %s.", metadata)
|
||||
hypermetro_lun = metadata.get('hypermetro_id') is not None
|
||||
|
||||
@ -488,7 +488,7 @@ class HuaweiFCDriver(common.HuaweiBaseDriver, driver.FibreChannelDriver):
|
||||
self.client.delete_mapping_view(view_id)
|
||||
|
||||
# Deal with hypermetro connection.
|
||||
metadata = huawei_utils.get_lun_metadata(volume)
|
||||
metadata = huawei_utils.get_volume_private_data(volume)
|
||||
LOG.info("Detach Volume, metadata is: %s.", metadata)
|
||||
|
||||
if metadata.get('hypermetro_id'):
|
||||
|
@ -17,29 +17,33 @@ import hashlib
|
||||
import json
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import units
|
||||
from oslo_utils import strutils
|
||||
import retrying
|
||||
import six
|
||||
|
||||
from cinder import context
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder import objects
|
||||
from cinder.objects import fields
|
||||
from cinder.volume.drivers.huawei import constants
|
||||
from cinder.volume import utils
|
||||
from cinder.volume import qos_specs
|
||||
from cinder.volume import volume_types
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def encode_name(id):
|
||||
encoded_name = hashlib.md5(id.encode('utf-8')).hexdigest()
|
||||
prefix = id.split('-')[0] + '-'
|
||||
def encode_name(name):
|
||||
encoded_name = hashlib.md5(name.encode('utf-8')).hexdigest()
|
||||
prefix = name.split('-')[0] + '-'
|
||||
postfix = encoded_name[:constants.MAX_NAME_LENGTH - len(prefix)]
|
||||
return prefix + postfix
|
||||
|
||||
|
||||
def old_encode_name(id):
|
||||
pre_name = id.split("-")[0]
|
||||
vol_encoded = six.text_type(hash(id))
|
||||
def old_encode_name(name):
|
||||
pre_name = name.split("-")[0]
|
||||
vol_encoded = six.text_type(hash(name))
|
||||
if vol_encoded.startswith('-'):
|
||||
newuuid = pre_name + vol_encoded
|
||||
else:
|
||||
@ -61,101 +65,287 @@ def old_encode_host_name(name):
|
||||
|
||||
|
||||
def wait_for_condition(func, interval, timeout):
|
||||
def _retry_on_result(result):
|
||||
return not result
|
||||
|
||||
r = retrying.Retrying(retry_on_result=lambda result: not result,
|
||||
retry_on_exception=lambda result: False,
|
||||
def _retry_on_exception(result):
|
||||
return False
|
||||
|
||||
r = retrying.Retrying(retry_on_result=_retry_on_result,
|
||||
retry_on_exception=_retry_on_exception,
|
||||
wait_fixed=interval * 1000,
|
||||
stop_max_delay=timeout * 1000)
|
||||
try:
|
||||
r.call(func)
|
||||
except retrying.RetryError:
|
||||
msg = _('wait_for_condition: %s timed out.') % func.__name__
|
||||
r.call(func)
|
||||
|
||||
|
||||
def _get_volume_type(volume):
|
||||
if volume.volume_type:
|
||||
return volume.volume_type
|
||||
if volume.volume_type_id:
|
||||
return volume_types.get_volume_type(None, volume.volume_type_id)
|
||||
|
||||
|
||||
def get_volume_params(volume):
|
||||
volume_type = _get_volume_type(volume)
|
||||
return get_volume_type_params(volume_type)
|
||||
|
||||
|
||||
def get_volume_type_params(volume_type):
|
||||
specs = {}
|
||||
if isinstance(volume_type, dict) and volume_type.get('extra_specs'):
|
||||
specs = volume_type['extra_specs']
|
||||
elif isinstance(volume_type, objects.VolumeType
|
||||
) and volume_type.extra_specs:
|
||||
specs = volume_type.extra_specs
|
||||
|
||||
vol_params = get_volume_params_from_specs(specs)
|
||||
vol_params['qos'] = None
|
||||
|
||||
if isinstance(volume_type, dict) and volume_type.get('qos_specs_id'):
|
||||
vol_params['qos'] = _get_qos_specs(volume_type['qos_specs_id'])
|
||||
elif isinstance(volume_type, objects.VolumeType
|
||||
) and volume_type.qos_specs_id:
|
||||
vol_params['qos'] = _get_qos_specs(volume_type.qos_specs_id)
|
||||
|
||||
LOG.info('volume opts %s.', vol_params)
|
||||
return vol_params
|
||||
|
||||
|
||||
def get_volume_params_from_specs(specs):
|
||||
opts = _get_opts_from_specs(specs)
|
||||
|
||||
_verify_smartcache_opts(opts)
|
||||
_verify_smartpartition_opts(opts)
|
||||
_verify_smartthin_opts(opts)
|
||||
|
||||
return opts
|
||||
|
||||
|
||||
def _get_opts_from_specs(specs):
|
||||
"""Get the well defined extra specs."""
|
||||
opts = {}
|
||||
|
||||
def _get_bool_param(k, v):
|
||||
words = v.split()
|
||||
if len(words) == 2 and words[0] == '<is>':
|
||||
return strutils.bool_from_string(words[1], strict=True)
|
||||
|
||||
msg = _("%(k)s spec must be specified as %(k)s='<is> True' "
|
||||
"or '<is> False'.") % {'k': k}
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
def _get_replication_type_param(k, v):
|
||||
words = v.split()
|
||||
if len(words) == 2 and words[0] == '<in>':
|
||||
REPLICA_SYNC_TYPES = {'sync': constants.REPLICA_SYNC_MODEL,
|
||||
'async': constants.REPLICA_ASYNC_MODEL}
|
||||
sync_type = words[1].lower()
|
||||
if sync_type in REPLICA_SYNC_TYPES:
|
||||
return REPLICA_SYNC_TYPES[sync_type]
|
||||
|
||||
msg = _("replication_type spec must be specified as "
|
||||
"replication_type='<in> sync' or '<in> async'.")
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
def _get_string_param(k, v):
|
||||
if not v:
|
||||
msg = _("%s spec must be specified as a string.") % k
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
return v
|
||||
|
||||
opts_capability = {
|
||||
'capabilities:smarttier': (_get_bool_param, False),
|
||||
'capabilities:smartcache': (_get_bool_param, False),
|
||||
'capabilities:smartpartition': (_get_bool_param, False),
|
||||
'capabilities:thin_provisioning_support': (_get_bool_param, False),
|
||||
'capabilities:thick_provisioning_support': (_get_bool_param, False),
|
||||
'capabilities:hypermetro': (_get_bool_param, False),
|
||||
'capabilities:replication_enabled': (_get_bool_param, False),
|
||||
'replication_type': (_get_replication_type_param,
|
||||
constants.REPLICA_ASYNC_MODEL),
|
||||
'smarttier:policy': (_get_string_param, None),
|
||||
'smartcache:cachename': (_get_string_param, None),
|
||||
'smartpartition:partitionname': (_get_string_param, None),
|
||||
'huawei_controller:controllername': (_get_string_param, None),
|
||||
'capabilities:dedup': (_get_bool_param, None),
|
||||
'capabilities:compression': (_get_bool_param, None),
|
||||
}
|
||||
|
||||
def _get_opt_key(spec_key):
|
||||
key_split = spec_key.split(':')
|
||||
if len(key_split) == 1:
|
||||
return key_split[0]
|
||||
else:
|
||||
return key_split[1]
|
||||
|
||||
for spec_key in opts_capability:
|
||||
opt_key = _get_opt_key(spec_key)
|
||||
opts[opt_key] = opts_capability[spec_key][1]
|
||||
|
||||
for key, value in six.iteritems(specs):
|
||||
if key not in opts_capability:
|
||||
continue
|
||||
func = opts_capability[key][0]
|
||||
opt_key = _get_opt_key(key)
|
||||
opts[opt_key] = func(key, value)
|
||||
|
||||
return opts
|
||||
|
||||
|
||||
def get_volume_size(volume):
|
||||
"""Calculate the volume size.
|
||||
def _get_qos_specs(qos_specs_id):
|
||||
ctxt = context.get_admin_context()
|
||||
specs = qos_specs.get_qos_specs(ctxt, qos_specs_id)
|
||||
if specs is None:
|
||||
return {}
|
||||
|
||||
We should divide the given volume size by 512 for the 18000 system
|
||||
calculates volume size with sectors, which is 512 bytes.
|
||||
"""
|
||||
volume_size = units.Gi / 512 # 1G
|
||||
if int(volume.size) != 0:
|
||||
volume_size = int(volume.size) * units.Gi / 512
|
||||
if specs.get('consumer') == 'front-end':
|
||||
return {}
|
||||
|
||||
return volume_size
|
||||
kvs = specs.get('specs', {})
|
||||
LOG.info('The QoS specs is: %s.', kvs)
|
||||
|
||||
qos = {'IOTYPE': kvs.pop('IOType', None)}
|
||||
|
||||
if qos['IOTYPE'] not in constants.QOS_IOTYPES:
|
||||
msg = _('IOType must be in %(types)s.'
|
||||
) % {'types': constants.QOS_IOTYPES}
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
for k, v in kvs.items():
|
||||
if k not in constants.QOS_SPEC_KEYS:
|
||||
msg = _('QoS key %s is not valid.') % k
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
if int(v) <= 0:
|
||||
msg = _('QoS value for %s must > 0.') % k
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
qos[k.upper()] = v
|
||||
|
||||
if len(qos) < 2:
|
||||
msg = _('QoS policy must specify both IOType and one another '
|
||||
'qos spec, got policy: %s.') % qos
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
qos_keys = set(qos.keys())
|
||||
if (qos_keys & set(constants.UPPER_LIMIT_KEYS) and
|
||||
qos_keys & set(constants.LOWER_LIMIT_KEYS)):
|
||||
msg = _('QoS policy upper limit and lower limit '
|
||||
'conflict, QoS policy: %s.') % qos
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
return qos
|
||||
|
||||
|
||||
def get_volume_metadata(volume):
|
||||
if type(volume) is objects.Volume:
|
||||
return volume.metadata
|
||||
|
||||
if 'volume_metadata' in volume:
|
||||
metadata = volume.get('volume_metadata')
|
||||
return {item['key']: item['value'] for item in metadata}
|
||||
|
||||
return {}
|
||||
def _verify_smartthin_opts(opts):
|
||||
if (opts['thin_provisioning_support'] and
|
||||
opts['thick_provisioning_support']):
|
||||
msg = _('Cannot set thin and thick at the same time.')
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
elif opts['thin_provisioning_support']:
|
||||
opts['LUNType'] = constants.THIN_LUNTYPE
|
||||
elif opts['thick_provisioning_support']:
|
||||
opts['LUNType'] = constants.THICK_LUNTYPE
|
||||
|
||||
|
||||
def get_admin_metadata(volume):
|
||||
admin_metadata = {}
|
||||
if 'admin_metadata' in volume:
|
||||
admin_metadata = volume.admin_metadata
|
||||
elif 'volume_admin_metadata' in volume:
|
||||
metadata = volume.get('volume_admin_metadata', [])
|
||||
admin_metadata = {item['key']: item['value'] for item in metadata}
|
||||
|
||||
LOG.debug("Volume ID: %(id)s, admin_metadata: %(admin_metadata)s.",
|
||||
{"id": volume.id, "admin_metadata": admin_metadata})
|
||||
return admin_metadata
|
||||
def _verify_smartcache_opts(opts):
|
||||
if opts['smartcache'] and not opts['cachename']:
|
||||
msg = _('Cache name is not specified, please set '
|
||||
'smartcache:cachename in extra specs.')
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
|
||||
def get_snapshot_metadata_value(snapshot):
|
||||
if type(snapshot) is objects.Snapshot:
|
||||
return snapshot.metadata
|
||||
|
||||
if 'snapshot_metadata' in snapshot:
|
||||
metadata = snapshot.snapshot_metadata
|
||||
return {item['key']: item['value'] for item in metadata}
|
||||
|
||||
return {}
|
||||
def _verify_smartpartition_opts(opts):
|
||||
if opts['smartpartition'] and not opts['partitionname']:
|
||||
msg = _('Partition name is not specified, please set '
|
||||
'smartpartition:partitionname in extra specs.')
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
|
||||
def check_whether_operate_consistency_group(func):
|
||||
def wrapper(self, context, group, *args, **kwargs):
|
||||
if not utils.is_group_a_cg_snapshot_type(group):
|
||||
msg = _("%s, the group or group snapshot is not cg or "
|
||||
"cg_snapshot") % func.__name__
|
||||
LOG.debug(msg)
|
||||
raise NotImplementedError(msg)
|
||||
return func(self, context, group, *args, **kwargs)
|
||||
return wrapper
|
||||
def wait_lun_online(client, lun_id, wait_interval=None, wait_timeout=None):
|
||||
def _lun_online():
|
||||
result = client.get_lun_info_by_id(lun_id)
|
||||
if result['HEALTHSTATUS'] != constants.STATUS_HEALTH:
|
||||
err_msg = _('LUN %s is abnormal.') % lun_id
|
||||
LOG.error(err_msg)
|
||||
raise exception.VolumeBackendAPIException(data=err_msg)
|
||||
|
||||
if result['RUNNINGSTATUS'] == constants.LUN_INITIALIZING:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
if not wait_interval:
|
||||
wait_interval = constants.DEFAULT_WAIT_INTERVAL
|
||||
if not wait_timeout:
|
||||
wait_timeout = wait_interval * 10
|
||||
|
||||
wait_for_condition(_lun_online, wait_interval, wait_timeout)
|
||||
|
||||
|
||||
def is_not_exist_exc(exc):
|
||||
msg = getattr(exc, 'msg', '')
|
||||
return 'not exist' in msg
|
||||
|
||||
|
||||
def to_string(**kwargs):
|
||||
return json.dumps(kwargs) if kwargs else ''
|
||||
|
||||
|
||||
def get_lun_metadata(volume):
|
||||
def to_dict(text):
|
||||
return json.loads(text) if text else {}
|
||||
|
||||
|
||||
def get_volume_private_data(volume):
|
||||
if not volume.provider_location:
|
||||
return {}
|
||||
|
||||
info = json.loads(volume.provider_location)
|
||||
try:
|
||||
info = json.loads(volume.provider_location)
|
||||
except Exception:
|
||||
LOG.exception("Decode volume provider_location error")
|
||||
return {}
|
||||
|
||||
if isinstance(info, dict):
|
||||
return info
|
||||
|
||||
# To keep compatible with old driver version
|
||||
admin_metadata = get_admin_metadata(volume)
|
||||
metadata = get_volume_metadata(volume)
|
||||
return {'huawei_lun_id': six.text_type(info),
|
||||
'huawei_lun_wwn': admin_metadata.get('huawei_lun_wwn'),
|
||||
'hypermetro_id': metadata.get('hypermetro_id'),
|
||||
'remote_lun_id': metadata.get('remote_lun_id')
|
||||
'huawei_lun_wwn': volume.admin_metadata.get('huawei_lun_wwn'),
|
||||
'huawei_sn': volume.metadata.get('huawei_sn'),
|
||||
'hypermetro_id': volume.metadata.get('hypermetro_id'),
|
||||
'remote_lun_id': volume.metadata.get('remote_lun_id')
|
||||
}
|
||||
|
||||
|
||||
def get_snapshot_metadata(snapshot):
|
||||
def get_volume_metadata(volume):
|
||||
if isinstance(volume, objects.Volume):
|
||||
return volume.metadata
|
||||
if volume.get('volume_metadata'):
|
||||
return {item['key']: item['value'] for item in
|
||||
volume['volume_metadata']}
|
||||
return {}
|
||||
|
||||
|
||||
def get_replication_data(volume):
|
||||
if not volume.replication_driver_data:
|
||||
return {}
|
||||
|
||||
return json.loads(volume.replication_driver_data)
|
||||
|
||||
|
||||
def get_snapshot_private_data(snapshot):
|
||||
if not snapshot.provider_location:
|
||||
return {}
|
||||
|
||||
@ -164,41 +354,66 @@ def get_snapshot_metadata(snapshot):
|
||||
return info
|
||||
|
||||
# To keep compatible with old driver version
|
||||
return {'huawei_snapshot_id': six.text_type(info)}
|
||||
return {'huawei_snapshot_id': six.text_type(info),
|
||||
'huawei_snapshot_wwn': snapshot.metadata.get(
|
||||
'huawei_snapshot_wwn'),
|
||||
}
|
||||
|
||||
|
||||
def get_volume_lun_id(client, volume):
|
||||
metadata = get_lun_metadata(volume)
|
||||
lun_id = metadata.get('huawei_lun_id')
|
||||
def get_external_lun_info(client, external_ref):
|
||||
lun_info = None
|
||||
if 'source-id' in external_ref:
|
||||
lun = client.get_lun_info_by_id(external_ref['source-id'])
|
||||
lun_info = client.get_lun_info_by_name(lun['NAME'])
|
||||
elif 'source-name' in external_ref:
|
||||
lun_info = client.get_lun_info_by_name(external_ref['source-name'])
|
||||
|
||||
# First try the new encoded way.
|
||||
if not lun_id:
|
||||
volume_name = encode_name(volume.id)
|
||||
lun_id = client.get_lun_id_by_name(volume_name)
|
||||
return lun_info
|
||||
|
||||
|
||||
def get_external_snapshot_info(client, external_ref):
|
||||
snapshot_info = None
|
||||
if 'source-id' in external_ref:
|
||||
snapshot_info = client.get_snapshot_info_by_id(
|
||||
external_ref['source-id'])
|
||||
elif 'source-name' in external_ref:
|
||||
snapshot_info = client.get_snapshot_info_by_name(
|
||||
external_ref['source-name'])
|
||||
|
||||
return snapshot_info
|
||||
|
||||
|
||||
def get_lun_info(client, volume):
|
||||
metadata = get_volume_private_data(volume)
|
||||
|
||||
volume_name = encode_name(volume.id)
|
||||
lun_info = client.get_lun_info_by_name(volume_name)
|
||||
|
||||
# If new encoded way not found, try the old encoded way.
|
||||
if not lun_id:
|
||||
if not lun_info:
|
||||
volume_name = old_encode_name(volume.id)
|
||||
lun_id = client.get_lun_id_by_name(volume_name)
|
||||
lun_info = client.get_lun_info_by_name(volume_name)
|
||||
|
||||
return lun_id, metadata.get('huawei_lun_wwn')
|
||||
if not lun_info and metadata.get('huawei_lun_id'):
|
||||
lun_info = client.get_lun_info_by_id(metadata['huawei_lun_id'])
|
||||
|
||||
if lun_info and ('huawei_lun_wwn' in metadata and
|
||||
lun_info.get('WWN') != metadata['huawei_lun_wwn']):
|
||||
return None
|
||||
|
||||
return lun_info
|
||||
|
||||
|
||||
def get_snapshot_id(client, snapshot):
|
||||
metadata = get_snapshot_metadata(snapshot)
|
||||
snapshot_id = metadata.get('huawei_snapshot_id')
|
||||
|
||||
# First try the new encoded way.
|
||||
if not snapshot_id:
|
||||
name = encode_name(snapshot.id)
|
||||
snapshot_id = client.get_snapshot_id_by_name(name)
|
||||
def get_snapshot_info(client, snapshot):
|
||||
name = encode_name(snapshot.id)
|
||||
snapshot_info = client.get_snapshot_info_by_name(name)
|
||||
|
||||
# If new encoded way not found, try the old encoded way.
|
||||
if not snapshot_id:
|
||||
if not snapshot_info:
|
||||
name = old_encode_name(snapshot.id)
|
||||
snapshot_id = client.get_snapshot_id_by_name(name)
|
||||
snapshot_info = client.get_snapshot_info_by_name(name)
|
||||
|
||||
return snapshot_id
|
||||
return snapshot_info
|
||||
|
||||
|
||||
def get_host_id(client, host_name):
|
||||
@ -212,3 +427,57 @@ def get_host_id(client, host_name):
|
||||
host_id = client.get_host_id_by_name(encoded_name)
|
||||
|
||||
return host_id
|
||||
|
||||
|
||||
def get_hypermetro_group(client, group_id):
|
||||
encoded_name = encode_name(group_id)
|
||||
group = client.get_metrogroup_by_name(encoded_name)
|
||||
if not group:
|
||||
encoded_name = old_encode_name(group_id)
|
||||
group = client.get_metrogroup_by_name(encoded_name)
|
||||
return group
|
||||
|
||||
|
||||
def get_replication_group(client, group_id):
|
||||
encoded_name = encode_name(group_id)
|
||||
group = client.get_replication_group_by_name(encoded_name)
|
||||
if not group:
|
||||
encoded_name = old_encode_name(group_id)
|
||||
group = client.get_replication_group_by_name(encoded_name)
|
||||
return group
|
||||
|
||||
|
||||
def get_volume_model_update(volume, **kwargs):
|
||||
private_data = get_volume_private_data(volume)
|
||||
|
||||
if kwargs.get('hypermetro_id'):
|
||||
private_data['hypermetro_id'] = kwargs.get('hypermetro_id')
|
||||
elif 'hypermetro_id' in private_data:
|
||||
private_data.pop('hypermetro_id')
|
||||
|
||||
if 'huawei_lun_id' in kwargs:
|
||||
private_data['huawei_lun_id'] = kwargs['huawei_lun_id']
|
||||
if 'huawei_lun_wwn' in kwargs:
|
||||
private_data['huawei_lun_wwn'] = kwargs['huawei_lun_wwn']
|
||||
if 'huawei_sn' in kwargs:
|
||||
private_data['huawei_sn'] = kwargs['huawei_sn']
|
||||
|
||||
model_update = {'provider_location': to_string(**private_data)}
|
||||
|
||||
if kwargs.get('replication_id'):
|
||||
model_update['replication_driver_data'] = to_string(
|
||||
pair_id=kwargs.get('replication_id'))
|
||||
model_update['replication_status'] = fields.ReplicationStatus.ENABLED
|
||||
else:
|
||||
model_update['replication_driver_data'] = None
|
||||
model_update['replication_status'] = fields.ReplicationStatus.DISABLED
|
||||
|
||||
return model_update
|
||||
|
||||
|
||||
def get_group_type_params(group):
|
||||
opts = []
|
||||
for volume_type in group.volume_types:
|
||||
opt = get_volume_type_params(volume_type)
|
||||
opts.append(opt)
|
||||
return opts
|
||||
|
@ -75,7 +75,7 @@ class HuaweiHyperMetro(object):
|
||||
|
||||
def delete_hypermetro(self, volume):
|
||||
"""Delete hypermetro."""
|
||||
metadata = huawei_utils.get_lun_metadata(volume)
|
||||
metadata = huawei_utils.get_volume_private_data(volume)
|
||||
metro_id = metadata['hypermetro_id']
|
||||
remote_lun_id = metadata['remote_lun_id']
|
||||
|
||||
@ -110,7 +110,7 @@ class HuaweiHyperMetro(object):
|
||||
{'wwpns': wwns,
|
||||
'id': volume.id})
|
||||
|
||||
metadata = huawei_utils.get_lun_metadata(volume)
|
||||
metadata = huawei_utils.get_volume_private_data(volume)
|
||||
lun_id = metadata.get('remote_lun_id')
|
||||
if lun_id is None:
|
||||
msg = _("Can't get volume id. Volume name: %s.") % volume.id
|
||||
@ -179,7 +179,7 @@ class HuaweiHyperMetro(object):
|
||||
def disconnect_volume_fc(self, volume, connector):
|
||||
"""Delete map between a volume and a host for FC."""
|
||||
wwns = connector['wwpns']
|
||||
metadata = huawei_utils.get_lun_metadata(volume)
|
||||
metadata = huawei_utils.get_volume_private_data(volume)
|
||||
lun_id = metadata.get('remote_lun_id')
|
||||
host_name = connector['host']
|
||||
left_lunnum = -1
|
||||
@ -324,7 +324,7 @@ class HuaweiHyperMetro(object):
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
def check_metro_need_to_stop(self, volume):
|
||||
metadata = huawei_utils.get_lun_metadata(volume)
|
||||
metadata = huawei_utils.get_volume_private_data(volume)
|
||||
metro_id = metadata['hypermetro_id']
|
||||
metro_existed = self.client.check_hypermetro_exist(metro_id)
|
||||
|
||||
|
@ -588,7 +588,7 @@ class ReplicaPairManager(object):
|
||||
# Switch replication pair role, and start synchronize.
|
||||
self.rmt_driver.enable(pair_id)
|
||||
|
||||
local_metadata = huawei_utils.get_lun_metadata(v)
|
||||
local_metadata = huawei_utils.get_volume_private_data(v)
|
||||
new_drv_data = to_string(
|
||||
{'pair_id': pair_id,
|
||||
'rmt_lun_id': local_metadata.get('huawei_lun_id'),
|
||||
@ -630,7 +630,7 @@ class ReplicaPairManager(object):
|
||||
|
||||
self.rmt_driver.failover(pair_id)
|
||||
|
||||
local_metadata = huawei_utils.get_lun_metadata(v)
|
||||
local_metadata = huawei_utils.get_volume_private_data(v)
|
||||
new_drv_data = to_string(
|
||||
{'pair_id': pair_id,
|
||||
'rmt_lun_id': local_metadata.get('huawei_lun_id'),
|
||||
|
@ -1416,7 +1416,7 @@ class RestClient(object):
|
||||
|
||||
return target_iqn
|
||||
|
||||
def create_qos_policy(self, qos, lun_id):
|
||||
def create_qos(self, qos, lun_id):
|
||||
# Get local time.
|
||||
localtime = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
|
||||
# Package QoS name.
|
||||
@ -1440,8 +1440,7 @@ class RestClient(object):
|
||||
|
||||
return result['data']['ID']
|
||||
|
||||
def delete_qos_policy(self, qos_id):
|
||||
"""Delete a QoS policy."""
|
||||
def delete_qos(self, qos_id):
|
||||
url = "/ioclass/" + qos_id
|
||||
data = {"TYPE": "230", "ID": qos_id}
|
||||
|
||||
@ -2371,3 +2370,49 @@ class RestClient(object):
|
||||
|
||||
if result.get("data"):
|
||||
return result.get("data").get("COUNT")
|
||||
|
||||
def get_lun_info_by_name(self, name):
|
||||
url = "/lun?filter=NAME::%s" % name
|
||||
result = self.call(url, None, "GET")
|
||||
|
||||
msg = _('Get lun by name %s error.') % name
|
||||
self._assert_rest_result(result, msg)
|
||||
|
||||
if result.get('data'):
|
||||
return result['data'][0]
|
||||
|
||||
def get_lun_info_by_id(self, lun_id):
|
||||
url = "/lun/" + lun_id
|
||||
result = self.call(url, None, "GET")
|
||||
|
||||
msg = _('Get lun by id %s error.') % lun_id
|
||||
self._assert_rest_result(result, msg)
|
||||
|
||||
return result['data']
|
||||
|
||||
def get_snapshot_info_by_name(self, name):
|
||||
url = "/snapshot?filter=NAME::%s" % name
|
||||
result = self.call(url, None, "GET")
|
||||
|
||||
msg = _('Get snapshot by name %s error.') % name
|
||||
self._assert_rest_result(result, msg)
|
||||
|
||||
if result.get('data'):
|
||||
return result['data'][0]
|
||||
|
||||
def get_snapshot_info_by_id(self, snapshot_id):
|
||||
url = "/snapshot/" + snapshot_id
|
||||
result = self.call(url, None, "GET")
|
||||
|
||||
msg = _('Get snapshot by id %s error.') % snapshot_id
|
||||
self._assert_rest_result(result, msg)
|
||||
|
||||
return result['data']
|
||||
|
||||
def update_qos_luns(self, qos_id, lun_list):
|
||||
url = "/ioclass/" + qos_id
|
||||
data = {"LUNLIST": lun_list}
|
||||
result = self.call(url, data, "PUT")
|
||||
|
||||
msg = _('Update luns of qos %s error.') % qos_id
|
||||
self._assert_rest_result(result, msg)
|
||||
|
@ -13,15 +13,14 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
import json
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from cinder import context
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder import utils
|
||||
from cinder.volume.drivers.huawei import constants
|
||||
from cinder.volume import qos_specs
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -30,211 +29,116 @@ class SmartQos(object):
|
||||
def __init__(self, client):
|
||||
self.client = client
|
||||
|
||||
@staticmethod
|
||||
def get_qos_by_volume_type(volume_type):
|
||||
# We prefer the qos_specs association
|
||||
# and override any existing extra-specs settings
|
||||
# if present.
|
||||
if not volume_type:
|
||||
return {}
|
||||
def _check_qos_consistency(self, policy, qos):
|
||||
for key in [k.upper() for k in constants.QOS_SPEC_KEYS]:
|
||||
if qos.get(key, '0') != policy.get(key, '0'):
|
||||
return False
|
||||
return True
|
||||
|
||||
qos_specs_id = volume_type.get('qos_specs_id')
|
||||
if not qos_specs_id:
|
||||
return {}
|
||||
|
||||
qos = {}
|
||||
io_type_flag = None
|
||||
ctxt = context.get_admin_context()
|
||||
consumer = qos_specs.get_qos_specs(ctxt, qos_specs_id)['consumer']
|
||||
if consumer == 'front-end':
|
||||
return {}
|
||||
|
||||
kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
|
||||
LOG.info('The QoS sepcs is: %s.', kvs)
|
||||
for k, v in kvs.items():
|
||||
if k not in constants.HUAWEI_VALID_KEYS:
|
||||
continue
|
||||
if k != 'IOType' and int(v) <= 0:
|
||||
msg = _('QoS config is wrong. %s must > 0.') % k
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
if k == 'IOType':
|
||||
if v not in ['0', '1', '2']:
|
||||
msg = _('Illegal value specified for IOTYPE: 0, 1, or 2.')
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
io_type_flag = 1
|
||||
qos[k.upper()] = v
|
||||
else:
|
||||
qos[k.upper()] = v
|
||||
|
||||
if not io_type_flag:
|
||||
msg = (_('QoS policy must specify for IOTYPE: 0, 1, or 2, '
|
||||
'QoS policy: %(qos_policy)s ') % {'qos_policy': qos})
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
# QoS policy must specify for IOTYPE and another qos_specs.
|
||||
if len(qos) < 2:
|
||||
msg = (_('QoS policy must specify for IOTYPE and another '
|
||||
'qos_specs, QoS policy: %(qos_policy)s.')
|
||||
% {'qos_policy': qos})
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
for upper_limit in constants.UPPER_LIMIT_KEYS:
|
||||
for lower_limit in constants.LOWER_LIMIT_KEYS:
|
||||
if upper_limit in qos and lower_limit in qos:
|
||||
msg = (_('QoS policy upper_limit and lower_limit '
|
||||
'conflict, QoS policy: %(qos_policy)s.')
|
||||
% {'qos_policy': qos})
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
return qos
|
||||
|
||||
def _is_high_priority(self, qos):
|
||||
"""Check QoS priority."""
|
||||
for key, value in qos.items():
|
||||
if (key.find('MIN') == 0) or (key.find('LATENCY') == 0):
|
||||
return True
|
||||
|
||||
return False
|
||||
def _change_lun_priority(self, qos, lun_id):
|
||||
for key in qos:
|
||||
if key.startswith('MIN') or key.startswith('LATENCY'):
|
||||
data = {"IOPRIORITY": "3"}
|
||||
self.client.update_lun(lun_id, data)
|
||||
break
|
||||
|
||||
@utils.synchronized('huawei_qos', external=True)
|
||||
def add(self, qos, lun_id):
|
||||
policy_id = None
|
||||
self._change_lun_priority(qos, lun_id)
|
||||
qos_id = self.client.create_qos(qos, lun_id)
|
||||
try:
|
||||
# Check QoS priority.
|
||||
if self._is_high_priority(qos):
|
||||
self.client.change_lun_priority(lun_id)
|
||||
# Create QoS policy and activate it.
|
||||
version = self.client.find_array_version()
|
||||
if version >= constants.ARRAY_VERSION:
|
||||
(qos_id, lun_list) = self.client.find_available_qos(qos)
|
||||
if qos_id:
|
||||
self.client.add_lun_to_qos(qos_id, lun_id, lun_list)
|
||||
else:
|
||||
policy_id = self.client.create_qos_policy(qos, lun_id)
|
||||
self.client.activate_deactivate_qos(policy_id, True)
|
||||
else:
|
||||
policy_id = self.client.create_qos_policy(qos, lun_id)
|
||||
self.client.activate_deactivate_qos(policy_id, True)
|
||||
self.client.activate_deactivate_qos(qos_id, True)
|
||||
except exception.VolumeBackendAPIException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
if policy_id is not None:
|
||||
self.client.delete_qos_policy(policy_id)
|
||||
self.remove(qos_id, lun_id)
|
||||
raise
|
||||
|
||||
return qos_id
|
||||
|
||||
@utils.synchronized('huawei_qos', external=True)
|
||||
def remove(self, qos_id, lun_id):
|
||||
qos_info = self.client.get_qos_info(qos_id)
|
||||
lun_list = self.client.get_lun_list_in_qos(qos_id, qos_info)
|
||||
if len(lun_list) <= 1:
|
||||
qos_status = qos_info['RUNNINGSTATUS']
|
||||
# 2: Active status.
|
||||
if qos_status != constants.STATUS_QOS_INACTIVE:
|
||||
def remove(self, qos_id, lun_id, qos_info=None):
|
||||
if not qos_info:
|
||||
qos_info = self.client.get_qos_info(qos_id)
|
||||
lun_list = json.loads(qos_info['LUNLIST'])
|
||||
if lun_id in lun_list:
|
||||
lun_list.remove(lun_id)
|
||||
|
||||
if len(lun_list) == 0:
|
||||
if qos_info['RUNNINGSTATUS'] != constants.QOS_INACTIVATED:
|
||||
self.client.activate_deactivate_qos(qos_id, False)
|
||||
self.client.delete_qos_policy(qos_id)
|
||||
self.client.delete_qos(qos_id)
|
||||
else:
|
||||
self.client.remove_lun_from_qos(lun_id, lun_list, qos_id)
|
||||
self.client.update_qos_luns(qos_id, lun_list)
|
||||
|
||||
def update(self, qos_id, new_qos, lun_id):
|
||||
qos_info = self.client.get_qos_info(qos_id)
|
||||
if self._check_qos_consistency(qos_info, new_qos):
|
||||
return
|
||||
|
||||
self.remove(qos_id, lun_id, qos_info)
|
||||
self.add(new_qos, lun_id)
|
||||
|
||||
|
||||
class SmartPartition(object):
|
||||
def __init__(self, client):
|
||||
self.client = client
|
||||
|
||||
def add(self, opts, lun_id):
|
||||
if opts['smartpartition'] != 'true':
|
||||
return
|
||||
if not opts['partitionname']:
|
||||
raise exception.InvalidInput(
|
||||
reason=_('Partition name is None, please set '
|
||||
'smartpartition:partitionname in key.'))
|
||||
|
||||
partition_id = self.client.get_partition_id_by_name(
|
||||
opts['partitionname'])
|
||||
def add(self, partitionname, lun_id):
|
||||
partition_id = self.client.get_partition_id_by_name(partitionname)
|
||||
if not partition_id:
|
||||
raise exception.InvalidInput(
|
||||
reason=(_('Can not find partition id by name %(name)s.')
|
||||
% {'name': opts['partitionname']}))
|
||||
msg = _('Cannot find partition by name %s.') % partitionname
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
self.client.add_lun_to_partition(lun_id, partition_id)
|
||||
return partition_id
|
||||
|
||||
def remove(self, partition_id, lun_id):
|
||||
self.client.remove_lun_from_partition(lun_id, partition_id)
|
||||
|
||||
def update(self, partition_id, partitionname, lun_id):
|
||||
partition_info = self.client.get_partition_info_by_id(partition_id)
|
||||
if partition_info['NAME'] == partitionname:
|
||||
return
|
||||
|
||||
self.remove(partition_id, lun_id)
|
||||
self.add(partitionname, lun_id)
|
||||
|
||||
def check_partition_valid(self, partitionname):
|
||||
partition_id = self.client.get_partition_id_by_name(partitionname)
|
||||
if not partition_id:
|
||||
msg = _("Partition %s doesn't exist.") % partitionname
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
|
||||
class SmartCache(object):
|
||||
def __init__(self, client):
|
||||
self.client = client
|
||||
|
||||
def add(self, opts, lun_id):
|
||||
if opts['smartcache'] != 'true':
|
||||
return
|
||||
if not opts['cachename']:
|
||||
raise exception.InvalidInput(
|
||||
reason=_('Cache name is None, please set '
|
||||
'smartcache:cachename in key.'))
|
||||
|
||||
cache_id = self.client.get_cache_id_by_name(opts['cachename'])
|
||||
def add(self, cachename, lun_id):
|
||||
cache_id = self.client.get_cache_id_by_name(cachename)
|
||||
if not cache_id:
|
||||
raise exception.InvalidInput(
|
||||
reason=(_('Can not find cache id by cache name %(name)s.')
|
||||
% {'name': opts['cachename']}))
|
||||
msg = _('Cannot find cache by name %s.') % cachename
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
self.client.add_lun_to_cache(lun_id, cache_id)
|
||||
return cache_id
|
||||
|
||||
def remove(self, cache_id, lun_id):
|
||||
self.client.remove_lun_from_cache(lun_id, cache_id)
|
||||
|
||||
class SmartX(object):
|
||||
def get_smartx_specs_opts(self, opts):
|
||||
# Check that smarttier is 0/1/2/3
|
||||
opts = self.get_smarttier_opts(opts)
|
||||
opts = self.get_smartthin_opts(opts)
|
||||
opts = self.get_smartcache_opts(opts)
|
||||
opts = self.get_smartpartition_opts(opts)
|
||||
return opts
|
||||
def update(self, cache_id, cachename, lun_id):
|
||||
cache_info = self.client.get_cache_info_by_id(cache_id)
|
||||
if cache_info['NAME'] == cachename:
|
||||
return
|
||||
|
||||
def get_smarttier_opts(self, opts):
|
||||
if opts['smarttier'] == 'true':
|
||||
if not opts['policy']:
|
||||
opts['policy'] = '1'
|
||||
elif opts['policy'] not in ['0', '1', '2', '3']:
|
||||
raise exception.InvalidInput(
|
||||
reason=(_('Illegal value specified for smarttier: '
|
||||
'set to either 0, 1, 2, or 3.')))
|
||||
else:
|
||||
opts['policy'] = '0'
|
||||
self.remove(cache_id, lun_id)
|
||||
self.add(cachename, lun_id)
|
||||
|
||||
return opts
|
||||
|
||||
def get_smartthin_opts(self, opts):
|
||||
if opts['thin_provisioning_support'] == 'true':
|
||||
if opts['thick_provisioning_support'] == 'true':
|
||||
raise exception.InvalidInput(
|
||||
reason=(_('Illegal value specified for thin: '
|
||||
'Can not set thin and thick at the same time.')))
|
||||
else:
|
||||
opts['LUNType'] = constants.THIN_LUNTYPE
|
||||
if opts['thick_provisioning_support'] == 'true':
|
||||
opts['LUNType'] = constants.THICK_LUNTYPE
|
||||
|
||||
return opts
|
||||
|
||||
def get_smartcache_opts(self, opts):
|
||||
if opts['smartcache'] == 'true':
|
||||
if not opts['cachename']:
|
||||
raise exception.InvalidInput(
|
||||
reason=_('Cache name is None, please set '
|
||||
'smartcache:cachename in key.'))
|
||||
else:
|
||||
opts['cachename'] = None
|
||||
|
||||
return opts
|
||||
|
||||
def get_smartpartition_opts(self, opts):
|
||||
if opts['smartpartition'] == 'true':
|
||||
if not opts['partitionname']:
|
||||
raise exception.InvalidInput(
|
||||
reason=_('Partition name is None, please set '
|
||||
'smartpartition:partitionname in key.'))
|
||||
else:
|
||||
opts['partitionname'] = None
|
||||
|
||||
return opts
|
||||
def check_cache_valid(self, cachename):
|
||||
cache_id = self.client.get_cache_id_by_name(cachename)
|
||||
if not cache_id:
|
||||
msg = _("Cache %s doesn't exit.") % cachename
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
Loading…
x
Reference in New Issue
Block a user