Merge "Add SmartX support for Huawei driver"
This commit is contained in:
commit
e1e33286ce
@ -30,6 +30,7 @@ from cinder.volume.drivers.huawei import constants
|
||||
from cinder.volume.drivers.huawei import huawei_driver
|
||||
from cinder.volume.drivers.huawei import huawei_utils
|
||||
from cinder.volume.drivers.huawei import rest_client
|
||||
from cinder.volume.drivers.huawei import smartx
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -43,7 +44,7 @@ test_volume = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
|
||||
'display_name': 'vol1',
|
||||
'display_description': 'test volume',
|
||||
'volume_type_id': None,
|
||||
'host': 'ubuntu@huawei#OpenStack_Pool',
|
||||
'host': 'ubuntu001@backend001#OpenStack_Pool',
|
||||
'provider_location': '11',
|
||||
}
|
||||
|
||||
@ -80,12 +81,23 @@ FakeConnector = {'initiator': 'iqn.1993-08.debian:01:ec2bff7ac3a3',
|
||||
'host': 'ubuntuc',
|
||||
}
|
||||
|
||||
smarttier_opts = {'smarttier': 'true',
|
||||
'smartpartition': False,
|
||||
'smartcache': False,
|
||||
'thin_provisioning_support': True,
|
||||
'thick_provisioning_support': False,
|
||||
'policy': '3',
|
||||
'readcachepolicy': '1',
|
||||
'writecachepolicy': None,
|
||||
}
|
||||
|
||||
# A fake response of success response storage
|
||||
FAKE_COMMON_SUCCESS_RESPONSE = """
|
||||
{
|
||||
"error": {
|
||||
"code": 0
|
||||
}
|
||||
},
|
||||
"data":{}
|
||||
}
|
||||
"""
|
||||
|
||||
@ -154,7 +166,8 @@ FAKE_LUN_DELETE_SUCCESS_RESPONSE = """
|
||||
"NAME": "5mFHcBv4RkCcD+JyrWc0SA",
|
||||
"RUNNINGSTATUS": "2",
|
||||
"HEALTHSTATUS": "1",
|
||||
"RUNNINGSTATUS": "27"
|
||||
"RUNNINGSTATUS": "27",
|
||||
"LUNLIST": ""
|
||||
}
|
||||
}
|
||||
"""
|
||||
@ -312,7 +325,7 @@ FAKE_GET_ETH_INFO_RESPONSE = """
|
||||
"MACADDRESS": "00:22:a1:0a:79:57",
|
||||
"ETHNEGOTIATE": "-1",
|
||||
"ERRORPACKETS": "0",
|
||||
"IPV4ADDR": "198.100.10.1",
|
||||
"IPV4ADDR": "192.168.1.2",
|
||||
"IPV6GATEWAY": "",
|
||||
"IPV6MASK": "0",
|
||||
"OVERFLOWEDPACKETS": "0",
|
||||
@ -342,7 +355,7 @@ FAKE_GET_ETH_INFO_RESPONSE = """
|
||||
"MACADDRESS": "00:22:a1:0a:79:57",
|
||||
"ETHNEGOTIATE": "-1",
|
||||
"ERRORPACKETS": "0",
|
||||
"IPV4ADDR": "198.100.10.2",
|
||||
"IPV4ADDR": "192.168.1.1",
|
||||
"IPV6GATEWAY": "",
|
||||
"IPV6MASK": "0",
|
||||
"OVERFLOWEDPACKETS": "0",
|
||||
@ -376,12 +389,12 @@ FAKE_GET_ETH_ASSOCIATE_RESPONSE = """
|
||||
"code":0
|
||||
},
|
||||
"data":[{
|
||||
"IPV4ADDR": "198.100.10.1",
|
||||
"IPV4ADDR": "192.168.1.1",
|
||||
"HEALTHSTATUS": "1",
|
||||
"RUNNINGSTATUS": "10"
|
||||
},
|
||||
{
|
||||
"IPV4ADDR": "198.100.10.2",
|
||||
"IPV4ADDR": "192.168.1.2",
|
||||
"HEALTHSTATUS": "1",
|
||||
"RUNNINGSTATUS": "10"
|
||||
}
|
||||
@ -448,7 +461,7 @@ FAKE_GET_ALL_HOST_GROUP_INFO_RESPONSE = """
|
||||
"code": 0
|
||||
},
|
||||
"data": [{
|
||||
"NAME":"OpenStack_HostGroup_1",
|
||||
"NAME": "OpenStack_HostGroup_1",
|
||||
"DESCRIPTION":"",
|
||||
"ID":"0",
|
||||
"TYPE":14
|
||||
@ -645,7 +658,7 @@ FAKE_PORT_GROUP_RESPONSE = """
|
||||
},
|
||||
"data":[{
|
||||
"ID":11,
|
||||
"NAME":"portgroup-test"
|
||||
"NAME": "portgroup-test"
|
||||
}]
|
||||
}
|
||||
"""
|
||||
@ -653,19 +666,19 @@ FAKE_PORT_GROUP_RESPONSE = """
|
||||
FAKE_ISCSI_INITIATOR_RESPONSE = """
|
||||
{
|
||||
"error":{
|
||||
"code":0
|
||||
"code": 0
|
||||
},
|
||||
"data":[{
|
||||
"CHAPNAME":"mm-user",
|
||||
"HEALTHSTATUS":"1",
|
||||
"ID":"iqn.1993-08.org.debian:01:9073aba6c6f",
|
||||
"ISFREE":"true",
|
||||
"MULTIPATHTYPE":"1",
|
||||
"NAME":"",
|
||||
"OPERATIONSYSTEM":"255",
|
||||
"RUNNINGSTATUS":"28",
|
||||
"TYPE":222,
|
||||
"USECHAP":"true"
|
||||
"CHAPNAME": "mm-user",
|
||||
"HEALTHSTATUS": "1",
|
||||
"ID": "iqn.1993-08.org.debian:01:9073aba6c6f",
|
||||
"ISFREE": "true",
|
||||
"MULTIPATHTYPE": "1",
|
||||
"NAME": "",
|
||||
"OPERATIONSYSTEM": "255",
|
||||
"RUNNINGSTATUS": "28",
|
||||
"TYPE": 222,
|
||||
"USECHAP": "true"
|
||||
}]
|
||||
}
|
||||
"""
|
||||
@ -710,6 +723,29 @@ FAKE_ERROR_LUN_INFO_RESPONSE = """
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
FAKE_SYSTEM_VERSION_RESPONSE = """
|
||||
{
|
||||
"error":{
|
||||
"code": 0
|
||||
},
|
||||
"data":{
|
||||
"PRODUCTVERSION": "V100R001C10"
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
FAKE_QOS_INFO_RESPONSE = """
|
||||
{
|
||||
"error":{
|
||||
"code": 0
|
||||
},
|
||||
"data":{
|
||||
"ID": "11"
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
# mock login info map
|
||||
MAP_COMMAND_TO_FAKE_RESPONSE = {}
|
||||
MAP_COMMAND_TO_FAKE_RESPONSE['/xx/sessions'] = (
|
||||
@ -734,6 +770,9 @@ MAP_COMMAND_TO_FAKE_RESPONSE['lun/1/GET'] = (
|
||||
MAP_COMMAND_TO_FAKE_RESPONSE['lun/11/DELETE'] = (
|
||||
FAKE_COMMON_SUCCESS_RESPONSE)
|
||||
|
||||
MAP_COMMAND_TO_FAKE_RESPONSE['lun/1/DELETE'] = (
|
||||
FAKE_COMMON_SUCCESS_RESPONSE)
|
||||
|
||||
MAP_COMMAND_TO_FAKE_RESPONSE['lun?range=[0-65535]/GET'] = (
|
||||
FAKE_QUERY_ALL_LUN_RESPONSE)
|
||||
|
||||
@ -770,6 +809,10 @@ MAP_COMMAND_TO_FAKE_RESPONSE['lungroup/associate?TYPE=256&ASSOCIATEOBJTYPE=11'
|
||||
'&ASSOCIATEOBJID=11/GET'] = (
|
||||
FAKE_LUN_ASSOCIATE_RESPONSE)
|
||||
|
||||
MAP_COMMAND_TO_FAKE_RESPONSE['lungroup/associate?TYPE=256&ASSOCIATEOBJTYPE=11'
|
||||
'&ASSOCIATEOBJID=1/GET'] = (
|
||||
FAKE_LUN_ASSOCIATE_RESPONSE)
|
||||
|
||||
MAP_COMMAND_TO_FAKE_RESPONSE['lungroup/associate?ID=11&ASSOCIATEOBJTYPE=11'
|
||||
'&ASSOCIATEOBJID=11/DELETE'] = (
|
||||
FAKE_COMMON_SUCCESS_RESPONSE)
|
||||
@ -814,6 +857,9 @@ MAP_COMMAND_TO_FAKE_RESPONSE['ioclass/11/DELETE'] = (
|
||||
MAP_COMMAND_TO_FAKE_RESPONSE['ioclass/active/11/PUT'] = (
|
||||
FAKE_COMMON_SUCCESS_RESPONSE)
|
||||
|
||||
MAP_COMMAND_TO_FAKE_RESPONSE['ioclass/'] = (
|
||||
FAKE_QOS_INFO_RESPONSE)
|
||||
|
||||
# mock iscsi info map
|
||||
MAP_COMMAND_TO_FAKE_RESPONSE['iscsi_tgt_port/GET'] = (
|
||||
FAKE_GET_ISCSI_INFO_RESPONSE)
|
||||
@ -896,6 +942,10 @@ MAP_COMMAND_TO_FAKE_RESPONSE['host_link?INITIATOR_TYPE=223'
|
||||
MAP_COMMAND_TO_FAKE_RESPONSE['portgroup?range=[0-8191]&TYPE=257/GET'] = (
|
||||
FAKE_PORT_GROUP_RESPONSE)
|
||||
|
||||
# mock system info map
|
||||
MAP_COMMAND_TO_FAKE_RESPONSE['system/'] = (
|
||||
FAKE_SYSTEM_VERSION_RESPONSE)
|
||||
|
||||
|
||||
def Fake_sleep(time):
|
||||
pass
|
||||
@ -941,6 +991,18 @@ class Fake18000Client(rest_client.RestClient):
|
||||
def _check_snapshot_exist(self, snapshot_id):
|
||||
return True
|
||||
|
||||
def get_partition_id_by_name(self, name):
|
||||
return "11"
|
||||
|
||||
def add_lun_to_partition(self, lunid, partition_id):
|
||||
pass
|
||||
|
||||
def get_cache_id_by_name(self, name):
|
||||
return "11"
|
||||
|
||||
def add_lun_to_cache(self, lunid, cache_id):
|
||||
pass
|
||||
|
||||
def call(self, url=False, data=None, method=None):
|
||||
url = url.replace('http://100.115.10.69:8082/deviceManager/rest', '')
|
||||
command = url.replace('/210235G7J20000000000/', '')
|
||||
@ -1002,11 +1064,11 @@ class Huawei18000ISCSIDriverTestCase(test.TestCase):
|
||||
self.driver.do_setup()
|
||||
self.portgroup = 'portgroup-test'
|
||||
self.iscsi_iqns = ['iqn.2006-08.com.huawei:oceanstor:21000022a:'
|
||||
':20500:198.100.10.1',
|
||||
':20503:192.168.1.1',
|
||||
'iqn.2006-08.com.huawei:oceanstor:21000022a:'
|
||||
':20503:198.100.10.2']
|
||||
self.target_ips = ['198.100.10.1',
|
||||
'198.100.10.2']
|
||||
':20500:192.168.1.2']
|
||||
self.target_ips = ['192.168.1.1',
|
||||
'192.168.1.2']
|
||||
self.portgroup_id = 11
|
||||
|
||||
def test_login_success(self):
|
||||
@ -1128,8 +1190,8 @@ class Huawei18000ISCSIDriverTestCase(test.TestCase):
|
||||
def test_get_tgtip(self):
|
||||
self.driver.restclient.login()
|
||||
portg_id = self.driver.restclient.find_tgt_port_group(self.portgroup)
|
||||
result = self.driver.restclient._get_tgt_ip_from_portgroup(portg_id)
|
||||
self.assertEqual(self.target_ips, result)
|
||||
target_ip = self.driver.restclient._get_tgt_ip_from_portgroup(portg_id)
|
||||
self.assertEqual(self.target_ips, target_ip)
|
||||
|
||||
def test_get_iscsi_params(self):
|
||||
self.driver.restclient.login()
|
||||
@ -1226,6 +1288,33 @@ class Huawei18000ISCSIDriverTestCase(test.TestCase):
|
||||
pool_info = self.driver.restclient.find_pool_info(pool_name, pools)
|
||||
self.assertEqual(test_info, pool_info)
|
||||
|
||||
def test_get_smartx_specs_opts(self):
|
||||
self.driver.restclient.login()
|
||||
smartx_opts = smartx.SmartX().get_smartx_specs_opts(smarttier_opts)
|
||||
self.assertEqual('3', smartx_opts['policy'])
|
||||
|
||||
@mock.patch.object(huawei_utils, 'get_volume_qos',
|
||||
return_value={'MAXIOPS': '100',
|
||||
'IOType': '2'})
|
||||
def test_create_smartqos(self, mock_qos_value):
|
||||
self.driver.restclient.login()
|
||||
lun_info = self.driver.create_volume(test_volume)
|
||||
self.assertEqual('1', lun_info['provider_location'])
|
||||
|
||||
@mock.patch.object(huawei_utils, 'get_volume_params',
|
||||
return_value={'smarttier': 'true',
|
||||
'smartcache': 'true',
|
||||
'smartpartition': 'true',
|
||||
'thin_provisioning_support': 'true',
|
||||
'thick_provisioning_support': False,
|
||||
'policy': '2',
|
||||
'cachename': 'cache-test',
|
||||
'partitionname': 'partition-test'})
|
||||
def test_creat_smartx(self, mock_volume_types):
|
||||
self.driver.restclient.login()
|
||||
lun_info = self.driver.create_volume(test_volume)
|
||||
self.assertEqual('1', lun_info['provider_location'])
|
||||
|
||||
def create_fake_conf_file(self):
|
||||
"""Create a fake Config file.
|
||||
|
||||
@ -1297,7 +1386,7 @@ class Huawei18000ISCSIDriverTestCase(test.TestCase):
|
||||
iscsi.appendChild(defaulttargetip)
|
||||
initiator = doc.createElement('Initiator')
|
||||
initiator.setAttribute('Name', 'iqn.1993-08.debian:01:ec2bff7ac3a3')
|
||||
initiator.setAttribute('TargetIP', '192.168.100.2')
|
||||
initiator.setAttribute('TargetIP', '192.168.1.2')
|
||||
initiator.setAttribute('CHAPinfo', 'mm-user;mm-user@storage')
|
||||
initiator.setAttribute('ALUA', '1')
|
||||
initiator.setAttribute('TargetPortGroup', 'portgroup-test')
|
||||
@ -1534,7 +1623,7 @@ class Huawei18000FCDriverTestCase(test.TestCase):
|
||||
iscsi.appendChild(defaulttargetip)
|
||||
initiator = doc.createElement('Initiator')
|
||||
initiator.setAttribute('Name', 'iqn.1993-08.debian:01:ec2bff7ac3a3')
|
||||
initiator.setAttribute('TargetIP', '192.168.100.2')
|
||||
initiator.setAttribute('TargetIP', '192.168.1.2')
|
||||
iscsi.appendChild(initiator)
|
||||
|
||||
prefetch = doc.createElement('Prefetch')
|
||||
|
@ -15,16 +15,18 @@
|
||||
|
||||
STATUS_HEALTH = '1'
|
||||
STATUS_RUNNING = '10'
|
||||
BLOCK_STORAGE_POOL_TYPE = '1'
|
||||
FILE_SYSTEM_POOL_TYPE = '2'
|
||||
STATUS_VOLUME_READY = '27'
|
||||
STATUS_LUNCOPY_READY = '40'
|
||||
STATUS_QOS_ACTIVE = '2'
|
||||
|
||||
BLOCK_STORAGE_POOL_TYPE = '1'
|
||||
FILE_SYSTEM_POOL_TYPE = '2'
|
||||
|
||||
HOSTGROUP_PREFIX = 'OpenStack_HostGroup_'
|
||||
LUNGROUP_PREFIX = 'OpenStack_LunGroup_'
|
||||
MAPPING_VIEW_PREFIX = 'OpenStack_Mapping_View_'
|
||||
QOS_NAME_PREFIX = 'OpenStack_'
|
||||
|
||||
ARRAY_VERSION = 'V300R003C00'
|
||||
CAPACITY_UNIT = 1024.0 / 1024.0 / 2
|
||||
DEFAULT_WAIT_TIMEOUT = 3600 * 24 * 30
|
||||
DEFAULT_WAIT_INTERVAL = 5
|
||||
@ -32,7 +34,7 @@ ERROR_CONNECT_TO_SERVER = -403
|
||||
ERROR_UNAUTHORIZED_TO_SERVER = -401
|
||||
SOCKET_TIME_OUT = 720
|
||||
|
||||
MAX_HOSTNAME_LENTH = 31
|
||||
MAX_HOSTNAME_LENGTH = 31
|
||||
|
||||
OS_TYPE = {'Linux': '0',
|
||||
'Windows': '1',
|
||||
|
@ -13,6 +13,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
import uuid
|
||||
|
||||
from oslo_config import cfg
|
||||
@ -27,6 +28,7 @@ from cinder.volume import driver
|
||||
from cinder.volume.drivers.huawei import constants
|
||||
from cinder.volume.drivers.huawei import huawei_utils
|
||||
from cinder.volume.drivers.huawei import rest_client
|
||||
from cinder.volume.drivers.huawei import smartx
|
||||
from cinder.volume import utils as volume_utils
|
||||
from cinder.zonemanager import utils as fczm_utils
|
||||
|
||||
@ -70,6 +72,10 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
@utils.synchronized('huawei', external=True)
|
||||
def create_volume(self, volume):
|
||||
"""Create a volume."""
|
||||
opts = huawei_utils.get_volume_params(volume)
|
||||
smartx_opts = smartx.SmartX().get_smartx_specs_opts(opts)
|
||||
params = huawei_utils.get_lun_params(self.xml_file_path,
|
||||
smartx_opts)
|
||||
pool_name = volume_utils.extract_host(volume['host'],
|
||||
level='pool')
|
||||
pools = self.restclient.find_all_pools()
|
||||
@ -88,7 +94,6 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
{'volume': volume_name,
|
||||
'size': volume_size})
|
||||
|
||||
params = huawei_utils.get_lun_conf_params(self.xml_file_path)
|
||||
params['pool_id'] = pool_info['ID']
|
||||
params['volume_size'] = volume_size
|
||||
params['volume_description'] = volume_description
|
||||
@ -99,6 +104,14 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
# Create LUN on the array.
|
||||
lun_info = self.restclient.create_volume(lun_param)
|
||||
lun_id = lun_info['ID']
|
||||
qos = huawei_utils.get_volume_qos(volume)
|
||||
if qos:
|
||||
smart_qos = smartx.SmartQos(self.restclient)
|
||||
smart_qos.create_qos(qos, lun_id)
|
||||
smartpartition = smartx.SmartPartition(self.restclient)
|
||||
smartpartition.add(opts, lun_id)
|
||||
smartcache = smartx.SmartCache(self.restclient)
|
||||
smartcache.add(opts, lun_id)
|
||||
|
||||
return {'provider_location': lun_info['ID'],
|
||||
'ID': lun_id,
|
||||
@ -119,6 +132,10 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
{'name': name, 'lun_id': lun_id},)
|
||||
if lun_id:
|
||||
if self.restclient.check_lun_exist(lun_id):
|
||||
qos_id = self.restclient.get_qosid_by_lunid(lun_id)
|
||||
if qos_id:
|
||||
self.remove_qos_lun(lun_id, qos_id)
|
||||
|
||||
self.restclient.delete_lun(lun_id)
|
||||
else:
|
||||
LOG.warning(_LW("Can't find lun %s on the array."), lun_id)
|
||||
@ -126,6 +143,17 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
|
||||
return True
|
||||
|
||||
def remove_qos_lun(self, lun_id, qos_id):
|
||||
lun_list = self.restclient.get_lun_list_in_qos(qos_id)
|
||||
lun_count = len(lun_list)
|
||||
if lun_count <= 1:
|
||||
qos = smartx.SmartQos(self.restclient)
|
||||
qos.delete_qos(qos_id)
|
||||
else:
|
||||
self.restclient.remove_lun_from_qos(lun_id,
|
||||
lun_list,
|
||||
qos_id)
|
||||
|
||||
def create_volume_from_snapshot(self, volume, snapshot):
|
||||
"""Create a volume from a snapshot.
|
||||
|
||||
@ -238,7 +266,7 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
def create_snapshot(self, snapshot):
|
||||
snapshot_info = self.restclient.create_snapshot(snapshot)
|
||||
snapshot_id = snapshot_info['ID']
|
||||
self.restclient.active_snapshot(snapshot_id)
|
||||
self.restclient.activate_snapshot(snapshot_id)
|
||||
|
||||
return {'provider_location': snapshot_info['ID'],
|
||||
'lun_info': snapshot_info}
|
||||
@ -283,9 +311,9 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
|
||||
host_name_before_hash = None
|
||||
host_name = connector['host']
|
||||
if host_name and (len(host_name) > constants.MAX_HOSTNAME_LENTH):
|
||||
if host_name and (len(host_name) > constants.MAX_HOSTNAME_LENGTH):
|
||||
host_name_before_hash = host_name
|
||||
host_name = str(hash(host_name))
|
||||
host_name = six.text_type(hash(host_name))
|
||||
|
||||
# Create hostgroup if not exist.
|
||||
host_id = self.restclient.add_host_with_check(host_name,
|
||||
@ -356,9 +384,9 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
# Create hostgroup if not exist.
|
||||
host_name = connector['host']
|
||||
host_name_before_hash = None
|
||||
if host_name and (len(host_name) > constants.MAX_HOSTNAME_LENTH):
|
||||
if host_name and (len(host_name) > constants.MAX_HOSTNAME_LENGTH):
|
||||
host_name_before_hash = host_name
|
||||
host_name = str(hash(host_name))
|
||||
host_name = six.text_type(hash(host_name))
|
||||
host_id = self.restclient.add_host_with_check(host_name,
|
||||
host_name_before_hash)
|
||||
|
||||
@ -604,6 +632,7 @@ class Huawei18000ISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver):
|
||||
CHAP support
|
||||
Multiple pools support
|
||||
ISCSI multipath support
|
||||
SmartX support
|
||||
"""
|
||||
|
||||
VERSION = "1.1.1"
|
||||
@ -640,6 +669,7 @@ class Huawei18000FCDriver(HuaweiBaseDriver, driver.FibreChannelDriver):
|
||||
1.1.0 - Provide Huawei OceanStor 18000 storage volume driver
|
||||
1.1.1 - Code refactor
|
||||
Multiple pools support
|
||||
SmartX support
|
||||
"""
|
||||
|
||||
VERSION = "1.1.1"
|
||||
|
@ -38,8 +38,8 @@ opts_capability = {
|
||||
'smarttier': False,
|
||||
'smartcache': False,
|
||||
'smartpartition': False,
|
||||
'thin_provisioning': False,
|
||||
'thick_provisioning': False,
|
||||
'thin_provisioning_support': False,
|
||||
'thick_provisioning_support': False,
|
||||
}
|
||||
|
||||
|
||||
@ -384,7 +384,7 @@ def wait_for_condition(xml_file_path, func, interval, timeout=None):
|
||||
try:
|
||||
res = func()
|
||||
except Exception as ex:
|
||||
raise exception.VolumeBackendAPIException(ex)
|
||||
raise exception.VolumeBackendAPIException(data=ex)
|
||||
if res:
|
||||
raise loopingcall.LoopingCallDone()
|
||||
|
||||
|
@ -170,7 +170,7 @@ class RestClient(object):
|
||||
self._assert_data_in_result(result, msg)
|
||||
return result
|
||||
|
||||
def find_pool_info(self, pool_name, result):
|
||||
def find_pool_info(self, pool_name=None, result=None):
|
||||
pool_info = {}
|
||||
if not pool_name:
|
||||
return pool_info
|
||||
@ -201,11 +201,11 @@ class RestClient(object):
|
||||
|
||||
return self._get_id_from_result(result, name, 'NAME')
|
||||
|
||||
def active_snapshot(self, snapshot_id):
|
||||
activeurl = self.url + "/snapshot/activate"
|
||||
def activate_snapshot(self, snapshot_id):
|
||||
activate_url = self.url + "/snapshot/activate"
|
||||
data = json.dumps({"SNAPSHOTLIST": [snapshot_id]})
|
||||
result = self.call(activeurl, data)
|
||||
self._assert_rest_result(result, _('Active snapshot error.'))
|
||||
result = self.call(activate_url, data)
|
||||
self._assert_rest_result(result, _('Activate snapshot error.'))
|
||||
|
||||
def create_snapshot(self, snapshot):
|
||||
snapshot_name = huawei_utils.encode_name(snapshot['id'])
|
||||
@ -660,7 +660,7 @@ class RestClient(object):
|
||||
|
||||
if 'data' in result:
|
||||
for item in result['data']:
|
||||
if item["ID"] == ininame:
|
||||
if item['ID'] == ininame:
|
||||
return True
|
||||
return False
|
||||
|
||||
@ -988,9 +988,9 @@ class RestClient(object):
|
||||
|
||||
return iscsi_port_info
|
||||
|
||||
def _get_tgt_iqn(self, iscsiip):
|
||||
def _get_tgt_iqn(self, iscsi_ip):
|
||||
"""Get target iSCSI iqn."""
|
||||
ip_info = self._get_iscsi_port_info(iscsiip)
|
||||
ip_info = self._get_iscsi_port_info(iscsi_ip)
|
||||
iqn_prefix = self._get_iscsi_tgt_port()
|
||||
if not ip_info:
|
||||
err_msg = (_(
|
||||
@ -1016,7 +1016,7 @@ class RestClient(object):
|
||||
if iqn_suffix[i] != '0':
|
||||
iqn_suffix = iqn_suffix[i:]
|
||||
break
|
||||
iqn = iqn_prefix + ':' + iqn_suffix + ':' + iscsiip
|
||||
iqn = iqn_prefix + ':' + iqn_suffix + ':' + iscsi_ip
|
||||
LOG.info(_LI('_get_tgt_iqn: iSCSI target iqn is: %s.'), iqn)
|
||||
return iqn
|
||||
else:
|
||||
@ -1043,9 +1043,9 @@ class RestClient(object):
|
||||
root = huawei_utils.parse_xml_file(self.xml_file_path)
|
||||
pool_names = root.findtext('Storage/StoragePool')
|
||||
if not pool_names:
|
||||
msg = (_(
|
||||
msg = _(
|
||||
'Invalid resource pool name. '
|
||||
'Please check the config file.'))
|
||||
'Please check the config file.')
|
||||
LOG.error(msg)
|
||||
raise exception.InvalidInput(msg)
|
||||
data = {}
|
||||
@ -1054,13 +1054,22 @@ class RestClient(object):
|
||||
for pool_name in pool_names.split(";"):
|
||||
pool_name = pool_name.strip(' \t\n\r')
|
||||
capacity = self._get_capacity(pool_name, result)
|
||||
pool = {'pool_name': pool_name,
|
||||
'total_capacity_gb': capacity['total_capacity'],
|
||||
'free_capacity_gb': capacity['free_capacity'],
|
||||
'reserved_percentage': 0,
|
||||
'QoS_support': True,
|
||||
}
|
||||
|
||||
pool = {}
|
||||
pool.update(dict(
|
||||
pool_name=pool_name,
|
||||
total_capacity_gb=capacity['total_capacity'],
|
||||
free_capacity_gb=capacity['free_capacity'],
|
||||
reserved_percentage=self.configuration.safe_get(
|
||||
'reserved_percentage'),
|
||||
QoS_support=True,
|
||||
max_over_subscription_ratio=self.configuration.safe_get(
|
||||
'max_over_subscription_ratio'),
|
||||
thin_provisioning_support=True,
|
||||
thick_provisioning_support=True,
|
||||
smarttier=True,
|
||||
smartcache=True,
|
||||
smartpartition=True,
|
||||
))
|
||||
data['pools'].append(pool)
|
||||
return data
|
||||
|
||||
@ -1083,11 +1092,11 @@ class RestClient(object):
|
||||
|
||||
return qos_info
|
||||
|
||||
def _update_qos_policy_lunlist(self, lunlist, policy_id):
|
||||
def _update_qos_policy_lunlist(self, lun_list, policy_id):
|
||||
url = self.url + "/ioclass/" + policy_id
|
||||
data = json.dumps({"TYPE": "230",
|
||||
"ID": policy_id,
|
||||
"LUNLIST": lunlist})
|
||||
"LUNLIST": lun_list})
|
||||
result = self.call(url, data, "PUT")
|
||||
self._assert_rest_result(result, _('Update QoS policy error.'))
|
||||
|
||||
@ -1212,18 +1221,19 @@ class RestClient(object):
|
||||
result = self.call(url, data, 'DELETE')
|
||||
self._assert_rest_result(result, _('Delete QoS policy error.'))
|
||||
|
||||
def active_deactive_qos(self, qos_id, enablestatus):
|
||||
"""Active or deactive QoS.
|
||||
def activate_deactivate_qos(self, qos_id, enablestatus):
|
||||
"""Activate or deactivate QoS.
|
||||
|
||||
enablestatus: true (active)
|
||||
enbalestatus: false (deactive)
|
||||
enablestatus: true (activate)
|
||||
enbalestatus: false (deactivate)
|
||||
"""
|
||||
url = self.url + "/ioclass/active/" + qos_id
|
||||
data = json.dumps({"TYPE": 230,
|
||||
"ID": qos_id,
|
||||
"ENABLESTATUS": enablestatus})
|
||||
result = self.call(url, data, "PUT")
|
||||
self._assert_rest_result(result, _('Active or deactive QoS error.'))
|
||||
self._assert_rest_result(
|
||||
result, _('Activate or deactivate QoS error.'))
|
||||
|
||||
def get_qos_info(self, qos_id):
|
||||
"""Get QoS information."""
|
||||
@ -1235,6 +1245,31 @@ class RestClient(object):
|
||||
|
||||
return result['data']
|
||||
|
||||
def get_lun_list_in_qos(self, qos_id):
|
||||
"""Get the lun list in QoS."""
|
||||
qos_info = self.get_qos_info(qos_id)
|
||||
lun_list = []
|
||||
lun_string = qos_info['LUNLIST'][1:-1]
|
||||
|
||||
for lun in lun_string.split(","):
|
||||
str = lun[1:-1]
|
||||
lun_list.append(str)
|
||||
|
||||
return lun_list
|
||||
|
||||
def remove_lun_from_qos(self, lun_id, lun_list, qos_id):
|
||||
"""Remove lun from QoS."""
|
||||
lun_list = [i for i in lun_list if i != lun_id]
|
||||
url = self.url + "/ioclass/" + qos_id
|
||||
data = json.dumps({"LUNLIST": lun_list,
|
||||
"TYPE": 230,
|
||||
"ID": qos_id})
|
||||
result = self.call(url, data, "PUT")
|
||||
|
||||
msg = _('Remove lun from Qos error.')
|
||||
self._assert_rest_result(result, msg)
|
||||
self._assert_data_in_result(result, msg)
|
||||
|
||||
def change_lun_priority(self, lun_id):
|
||||
"""Change lun priority to high."""
|
||||
url = self.url + "/lun/" + lun_id
|
||||
@ -1290,6 +1325,107 @@ class RestClient(object):
|
||||
|
||||
return result['data']
|
||||
|
||||
def get_partition_id_by_name(self, name):
|
||||
url = self.url + "/cachepartition"
|
||||
result = self.call(url, None, "GET")
|
||||
self._assert_rest_result(result, _('Get partition by name error.'))
|
||||
|
||||
if 'data' in result:
|
||||
for item in result['data']:
|
||||
LOG.debug('get_partition_id_by_name item %(item)s.',
|
||||
{'item': item})
|
||||
if name == item['NAME']:
|
||||
return item['ID']
|
||||
|
||||
return
|
||||
|
||||
def get_partition_info_by_id(self, partition_id):
|
||||
|
||||
url = self.url + '/cachepartition/' + partition_id
|
||||
data = json.dumps({"TYPE": '268',
|
||||
"ID": partition_id})
|
||||
|
||||
result = self.call(url, data, "GET")
|
||||
self._assert_rest_result(result,
|
||||
_('Get partition by partition id error.'))
|
||||
|
||||
return result['data']
|
||||
|
||||
def add_lun_to_partition(self, lun_id, partition_id):
|
||||
url = self.url + "/lun/associate/cachepartition"
|
||||
data = json.dumps({"ID": partition_id,
|
||||
"ASSOCIATEOBJTYPE": 11,
|
||||
"ASSOCIATEOBJID": lun_id, })
|
||||
result = self.call(url, data, "POST")
|
||||
self._assert_rest_result(result, _('Add lun to partition error.'))
|
||||
|
||||
def get_cache_id_by_name(self, name):
|
||||
url = self.url + "/SMARTCACHEPARTITION"
|
||||
result = self.call(url, None, "GET")
|
||||
self._assert_rest_result(result, _('Get cache by name error.'))
|
||||
|
||||
if 'data' in result:
|
||||
for item in result['data']:
|
||||
if name == item['NAME']:
|
||||
return item['ID']
|
||||
return
|
||||
|
||||
def find_available_qos(self, qos):
|
||||
""""Find available QoS on the array."""
|
||||
qos_id = None
|
||||
lun_list = []
|
||||
url = self.url + "/ioclass?range=[0-100]"
|
||||
result = self.call(url, None, "GET")
|
||||
self._assert_rest_result(result, _('Get QoS information error.'))
|
||||
|
||||
if 'data' in result:
|
||||
for item in result['data']:
|
||||
qos_flag = 0
|
||||
for key in qos:
|
||||
if key not in item:
|
||||
break
|
||||
elif qos[key] != item[key]:
|
||||
break
|
||||
qos_flag = qos_flag + 1
|
||||
if qos_flag == len(qos):
|
||||
qos_id = item['ID']
|
||||
lun_list = item['LUNLIST']
|
||||
break
|
||||
|
||||
return (qos_id, lun_list)
|
||||
|
||||
def add_lun_to_qos(self, qos_id, lun_id, lun_list):
|
||||
url = self.url + "/ioclass/" + qos_id
|
||||
lun_list = []
|
||||
lun_string = lun_list[1:-1]
|
||||
for lun in lun_string.split(","):
|
||||
str = lun[1:-1]
|
||||
lun_list.append(str)
|
||||
lun_list.append(lun_id)
|
||||
data = json.dumps({"LUNLIST": lun_list,
|
||||
"TYPE": 230,
|
||||
"ID": qos_id})
|
||||
result = self.call(url, data, "PUT")
|
||||
msg = _('Associate lun to Qos error.')
|
||||
self._assert_rest_result(result, msg)
|
||||
self._assert_data_in_result(result, msg)
|
||||
|
||||
def add_lun_to_cache(self, lun_id, cache_id):
|
||||
url = self.url + "/SMARTCACHEPARTITION/CREATE_ASSOCIATE"
|
||||
data = json.dumps({"ID": cache_id,
|
||||
"ASSOCIATEOBJTYPE": 11,
|
||||
"ASSOCIATEOBJID": lun_id,
|
||||
"TYPE": 273})
|
||||
result = self.call(url, data, "PUT")
|
||||
|
||||
self._assert_rest_result(result, _('Add lun to cache error.'))
|
||||
|
||||
def find_array_version(self):
|
||||
url = self.url + "/system/"
|
||||
result = self.call(url, None)
|
||||
self._assert_rest_result(result, _('Find array version error.'))
|
||||
return result['data']['PRODUCTVERSION']
|
||||
|
||||
def remove_host(self, host_id):
|
||||
url = self.url + "/host/%s" % host_id
|
||||
result = self.call(url, None, "DELETE")
|
||||
|
161
cinder/volume/drivers/huawei/smartx.py
Normal file
161
cinder/volume/drivers/huawei/smartx.py
Normal file
@ -0,0 +1,161 @@
|
||||
# Copyright (c) 2015 Huawei Technologies Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder.volume.drivers.huawei import constants
|
||||
from cinder.volume.drivers.huawei import huawei_utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SmartQos(object):
|
||||
def __init__(self, client):
|
||||
self.client = client
|
||||
|
||||
def create_qos(self, qos, lun_id):
|
||||
policy_id = None
|
||||
try:
|
||||
# Check QoS priority.
|
||||
if huawei_utils.check_qos_high_priority(qos):
|
||||
self.client.change_lun_priority(lun_id)
|
||||
# Create QoS policy and activate it.
|
||||
version = self.client.find_array_version()
|
||||
if version >= constants.ARRAY_VERSION:
|
||||
(qos_id, lun_list) = self.client.find_available_qos(qos)
|
||||
if qos_id:
|
||||
self.client.add_lun_to_qos(qos_id, lun_id, lun_list)
|
||||
else:
|
||||
policy_id = self.client.create_qos_policy(qos, lun_id)
|
||||
self.client.activate_deactivate_qos(policy_id, True)
|
||||
else:
|
||||
policy_id = self.client.create_qos_policy(qos, lun_id)
|
||||
self.client.activate_deactivate_qos(policy_id, True)
|
||||
except exception.VolumeBackendAPIException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
if policy_id is not None:
|
||||
self.client.delete_qos_policy(policy_id)
|
||||
|
||||
def delete_qos(self, qos_id):
|
||||
qos_info = self.client.get_qos_info(qos_id)
|
||||
qos_status = qos_info['RUNNINGSTATUS']
|
||||
# 2: Active status.
|
||||
if qos_status == constants.STATUS_QOS_ACTIVE:
|
||||
self.client.activate_deactivate_qos(qos_id, False)
|
||||
self.client.delete_qos_policy(qos_id)
|
||||
|
||||
|
||||
class SmartPartition(object):
|
||||
def __init__(self, client):
|
||||
self.client = client
|
||||
|
||||
def add(self, opts, lun_id):
|
||||
if opts['smartpartition'] != 'true':
|
||||
return
|
||||
if not opts['partitionname']:
|
||||
raise exception.InvalidInput(
|
||||
reason=_('Partition name is None, please set '
|
||||
'smartpartition:partitionname in key.'))
|
||||
|
||||
partition_id = self.client.get_partition_id_by_name(
|
||||
opts['partitionname'])
|
||||
if not partition_id:
|
||||
raise exception.InvalidInput(
|
||||
reason=(_('Can not find partition id by name %(name)s.')
|
||||
% {'name': opts['partitionname']}))
|
||||
|
||||
self.client.add_lun_to_partition(lun_id, partition_id)
|
||||
|
||||
|
||||
class SmartCache(object):
|
||||
def __init__(self, client):
|
||||
self.client = client
|
||||
|
||||
def add(self, opts, lun_id):
|
||||
if opts['smartcache'] != 'true':
|
||||
return
|
||||
if not opts['cachename']:
|
||||
raise exception.InvalidInput(
|
||||
reason=_('Cache name is None, please set '
|
||||
'smartcache:cachename in key.'))
|
||||
|
||||
cache_id = self.client.get_cache_id_by_name(opts['cachename'])
|
||||
if not cache_id:
|
||||
raise exception.InvalidInput(
|
||||
reason=(_('Can not find cache id by cache name %(name)s.')
|
||||
% {'name': opts['cachename']}))
|
||||
|
||||
self.client.add_lun_to_cache(lun_id, cache_id)
|
||||
|
||||
|
||||
class SmartX(object):
|
||||
def get_smartx_specs_opts(self, opts):
|
||||
# Check that smarttier is 0/1/2/3
|
||||
opts = self.get_smarttier_opts(opts)
|
||||
opts = self.get_smartthin_opts(opts)
|
||||
opts = self.get_smartcache_opts(opts)
|
||||
opts = self.get_smartpartition_opts(opts)
|
||||
return opts
|
||||
|
||||
def get_smarttier_opts(self, opts):
|
||||
if opts['smarttier'] == 'true':
|
||||
if not opts['policy']:
|
||||
opts['policy'] = '1'
|
||||
elif opts['policy'] not in ['0', '1', '2', '3']:
|
||||
raise exception.InvalidInput(
|
||||
reason=(_('Illegal value specified for smarttier: '
|
||||
'set to either 0, 1, 2, or 3.')))
|
||||
else:
|
||||
opts['policy'] = '0'
|
||||
|
||||
return opts
|
||||
|
||||
def get_smartthin_opts(self, opts):
|
||||
if opts['thin_provisioning_support'] == 'true':
|
||||
if opts['thick_provisioning_support'] == 'true':
|
||||
raise exception.InvalidInput(
|
||||
reason=(_('Illegal value specified for thin: '
|
||||
'Can not set thin and thick at the same time.')))
|
||||
else:
|
||||
opts['LUNType'] = 1
|
||||
if opts['thick_provisioning_support'] == 'true':
|
||||
opts['LUNType'] = 0
|
||||
|
||||
return opts
|
||||
|
||||
def get_smartcache_opts(self, opts):
|
||||
if opts['smartcache'] == 'true':
|
||||
if not opts['cachename']:
|
||||
raise exception.InvalidInput(
|
||||
reason=_('Cache name is None, please set '
|
||||
'smartcache:cachename in key.'))
|
||||
else:
|
||||
opts['cachename'] = None
|
||||
|
||||
return opts
|
||||
|
||||
def get_smartpartition_opts(self, opts):
|
||||
if opts['smartpartition'] == 'true':
|
||||
if not opts['partitionname']:
|
||||
raise exception.InvalidInput(
|
||||
reason=_('Partition name is None, please set '
|
||||
'smartpartition:partitionname in key.'))
|
||||
else:
|
||||
opts['partitionname'] = None
|
||||
|
||||
return opts
|
Loading…
x
Reference in New Issue
Block a user