Hitachi: add GAD volume support
This patch adds Global-Active-Device("GAD")(*) volume support for Hitachi VSP driver. New properties will be added in configuration: hbsd:topology sets to "active_active_mirror_volume" would specify a GAD volume. hitachi_mirror_xxx parameters would specify a secondary storage for GAD volume. (*) GAD is one of Hitachi storage product. It can use volume replication to provide a HA environment for hosts across systems and sites. Implements: blueprint hitachi-gad-support Change-Id: I4543cd036897b4db8b04011b808dd5af34439153
This commit is contained in:
parent
e6637b94fa
commit
a92aa06e46
@ -106,6 +106,8 @@ from cinder.volume.drivers.fusionstorage import dsware as \
|
|||||||
cinder_volume_drivers_fusionstorage_dsware
|
cinder_volume_drivers_fusionstorage_dsware
|
||||||
from cinder.volume.drivers.hitachi import hbsd_common as \
|
from cinder.volume.drivers.hitachi import hbsd_common as \
|
||||||
cinder_volume_drivers_hitachi_hbsdcommon
|
cinder_volume_drivers_hitachi_hbsdcommon
|
||||||
|
from cinder.volume.drivers.hitachi import hbsd_replication as \
|
||||||
|
cinder_volume_drivers_hitachi_hbsdreplication
|
||||||
from cinder.volume.drivers.hitachi import hbsd_rest as \
|
from cinder.volume.drivers.hitachi import hbsd_rest as \
|
||||||
cinder_volume_drivers_hitachi_hbsdrest
|
cinder_volume_drivers_hitachi_hbsdrest
|
||||||
from cinder.volume.drivers.hitachi import hbsd_rest_fc as \
|
from cinder.volume.drivers.hitachi import hbsd_rest_fc as \
|
||||||
@ -291,6 +293,17 @@ def list_opts():
|
|||||||
cinder_volume_drivers_datera_dateraiscsi.d_opts,
|
cinder_volume_drivers_datera_dateraiscsi.d_opts,
|
||||||
cinder_volume_drivers_fungible_driver.fungible_opts,
|
cinder_volume_drivers_fungible_driver.fungible_opts,
|
||||||
cinder_volume_drivers_fusionstorage_dsware.volume_opts,
|
cinder_volume_drivers_fusionstorage_dsware.volume_opts,
|
||||||
|
cinder_volume_drivers_hitachi_hbsdreplication._REP_OPTS,
|
||||||
|
cinder_volume_drivers_hitachi_hbsdreplication.
|
||||||
|
COMMON_MIRROR_OPTS,
|
||||||
|
cinder_volume_drivers_hitachi_hbsdreplication.
|
||||||
|
ISCSI_MIRROR_OPTS,
|
||||||
|
cinder_volume_drivers_hitachi_hbsdreplication.
|
||||||
|
REST_MIRROR_OPTS,
|
||||||
|
cinder_volume_drivers_hitachi_hbsdreplication.
|
||||||
|
REST_MIRROR_API_OPTS,
|
||||||
|
cinder_volume_drivers_hitachi_hbsdreplication.
|
||||||
|
REST_MIRROR_SSL_OPTS,
|
||||||
cinder_volume_drivers_infortrend_raidcmd_cli_commoncli.
|
cinder_volume_drivers_infortrend_raidcmd_cli_commoncli.
|
||||||
infortrend_opts,
|
infortrend_opts,
|
||||||
cinder_volume_drivers_inspur_as13000_as13000driver.
|
cinder_volume_drivers_inspur_as13000_as13000driver.
|
||||||
@ -356,8 +369,10 @@ def list_opts():
|
|||||||
FJ_ETERNUS_DX_OPT_opts,
|
FJ_ETERNUS_DX_OPT_opts,
|
||||||
cinder_volume_drivers_hitachi_hbsdcommon.COMMON_VOLUME_OPTS,
|
cinder_volume_drivers_hitachi_hbsdcommon.COMMON_VOLUME_OPTS,
|
||||||
cinder_volume_drivers_hitachi_hbsdcommon.COMMON_PORT_OPTS,
|
cinder_volume_drivers_hitachi_hbsdcommon.COMMON_PORT_OPTS,
|
||||||
|
cinder_volume_drivers_hitachi_hbsdcommon.COMMON_PAIR_OPTS,
|
||||||
cinder_volume_drivers_hitachi_hbsdcommon.COMMON_NAME_OPTS,
|
cinder_volume_drivers_hitachi_hbsdcommon.COMMON_NAME_OPTS,
|
||||||
cinder_volume_drivers_hitachi_hbsdrest.REST_VOLUME_OPTS,
|
cinder_volume_drivers_hitachi_hbsdrest.REST_VOLUME_OPTS,
|
||||||
|
cinder_volume_drivers_hitachi_hbsdrest.REST_PAIR_OPTS,
|
||||||
cinder_volume_drivers_hitachi_hbsdrestfc.FC_VOLUME_OPTS,
|
cinder_volume_drivers_hitachi_hbsdrestfc.FC_VOLUME_OPTS,
|
||||||
cinder_volume_drivers_hpe_hpe3parcommon.hpe3par_opts,
|
cinder_volume_drivers_hpe_hpe3parcommon.hpe3par_opts,
|
||||||
cinder_volume_drivers_hpe_nimble.nimble_opts,
|
cinder_volume_drivers_hpe_nimble.nimble_opts,
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -36,6 +36,7 @@ from cinder.volume import configuration as conf
|
|||||||
from cinder.volume import driver
|
from cinder.volume import driver
|
||||||
from cinder.volume.drivers.hitachi import hbsd_common
|
from cinder.volume.drivers.hitachi import hbsd_common
|
||||||
from cinder.volume.drivers.hitachi import hbsd_fc
|
from cinder.volume.drivers.hitachi import hbsd_fc
|
||||||
|
from cinder.volume.drivers.hitachi import hbsd_replication
|
||||||
from cinder.volume.drivers.hitachi import hbsd_rest
|
from cinder.volume.drivers.hitachi import hbsd_rest
|
||||||
from cinder.volume.drivers.hitachi import hbsd_rest_api
|
from cinder.volume.drivers.hitachi import hbsd_rest_api
|
||||||
from cinder.volume.drivers.hitachi import hbsd_rest_fc
|
from cinder.volume.drivers.hitachi import hbsd_rest_fc
|
||||||
@ -182,6 +183,16 @@ GET_HOST_WWNS_RESULT = {
|
|||||||
],
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
GET_HOST_GROUPS_RESULT_TEST = {
|
||||||
|
"data": [
|
||||||
|
{
|
||||||
|
"hostGroupNumber": 0,
|
||||||
|
"portId": CONFIG_MAP['port_id'],
|
||||||
|
"hostGroupName": CONFIG_MAP['host_grp_name'],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
COMPLETED_SUCCEEDED_RESULT = {
|
COMPLETED_SUCCEEDED_RESULT = {
|
||||||
"status": "Completed",
|
"status": "Completed",
|
||||||
"state": "Succeeded",
|
"state": "Succeeded",
|
||||||
@ -307,6 +318,16 @@ GET_HOST_GROUPS_RESULT = {
|
|||||||
],
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
GET_HOST_GROUPS_RESULT_PAIR = {
|
||||||
|
"data": [
|
||||||
|
{
|
||||||
|
"hostGroupNumber": 1,
|
||||||
|
"portId": CONFIG_MAP['port_id'],
|
||||||
|
"hostGroupName": "HBSD-pair00",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
GET_LDEVS_RESULT = {
|
GET_LDEVS_RESULT = {
|
||||||
"data": [
|
"data": [
|
||||||
{
|
{
|
||||||
@ -481,7 +502,6 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
self.configuration.hitachi_rest_disable_io_wait = True
|
self.configuration.hitachi_rest_disable_io_wait = True
|
||||||
self.configuration.hitachi_rest_tcp_keepalive = True
|
self.configuration.hitachi_rest_tcp_keepalive = True
|
||||||
self.configuration.hitachi_discard_zero_page = True
|
self.configuration.hitachi_discard_zero_page = True
|
||||||
self.configuration.hitachi_rest_number = "0"
|
|
||||||
self.configuration.hitachi_lun_timeout = hbsd_rest._LUN_TIMEOUT
|
self.configuration.hitachi_lun_timeout = hbsd_rest._LUN_TIMEOUT
|
||||||
self.configuration.hitachi_lun_retry_interval = (
|
self.configuration.hitachi_lun_retry_interval = (
|
||||||
hbsd_rest._LUN_RETRY_INTERVAL)
|
hbsd_rest._LUN_RETRY_INTERVAL)
|
||||||
@ -529,6 +549,21 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
self.configuration.chap_username = CONFIG_MAP['auth_user']
|
self.configuration.chap_username = CONFIG_MAP['auth_user']
|
||||||
self.configuration.chap_password = CONFIG_MAP['auth_password']
|
self.configuration.chap_password = CONFIG_MAP['auth_password']
|
||||||
|
|
||||||
|
self.configuration.hitachi_replication_number = 0
|
||||||
|
self.configuration.hitachi_pair_target_number = 0
|
||||||
|
self.configuration.hitachi_rest_pair_target_ports = []
|
||||||
|
self.configuration.hitachi_quorum_disk_id = ''
|
||||||
|
self.configuration.hitachi_mirror_copy_speed = ''
|
||||||
|
self.configuration.hitachi_mirror_storage_id = ''
|
||||||
|
self.configuration.hitachi_mirror_pool = ''
|
||||||
|
self.configuration.hitachi_mirror_ldev_range = ''
|
||||||
|
self.configuration.hitachi_mirror_target_ports = ''
|
||||||
|
self.configuration.hitachi_mirror_rest_user = ''
|
||||||
|
self.configuration.hitachi_mirror_rest_password = ''
|
||||||
|
self.configuration.hitachi_mirror_rest_api_ip = ''
|
||||||
|
self.configuration.hitachi_set_mirror_reserve_attribute = ''
|
||||||
|
self.configuration.hitachi_path_group_id = ''
|
||||||
|
|
||||||
self.configuration.safe_get = self._fake_safe_get
|
self.configuration.safe_get = self._fake_safe_get
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@ -553,7 +588,8 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
configuration=self.configuration)
|
configuration=self.configuration)
|
||||||
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
|
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
|
||||||
FakeResponse(200, GET_PORTS_RESULT),
|
FakeResponse(200, GET_PORTS_RESULT),
|
||||||
FakeResponse(200, GET_HOST_WWNS_RESULT)]
|
FakeResponse(200, GET_HOST_WWNS_RESULT),
|
||||||
|
FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)]
|
||||||
self.driver.do_setup(None)
|
self.driver.do_setup(None)
|
||||||
self.driver.check_for_setup_error()
|
self.driver.check_for_setup_error()
|
||||||
self.driver.local_path(None)
|
self.driver.local_path(None)
|
||||||
@ -580,13 +616,14 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
self._setup_config()
|
self._setup_config()
|
||||||
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
|
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
|
||||||
FakeResponse(200, GET_PORTS_RESULT),
|
FakeResponse(200, GET_PORTS_RESULT),
|
||||||
FakeResponse(200, GET_HOST_WWNS_RESULT)]
|
FakeResponse(200, GET_HOST_WWNS_RESULT),
|
||||||
|
FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)]
|
||||||
drv.do_setup(None)
|
drv.do_setup(None)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
{CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']},
|
{CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']},
|
||||||
drv.common.storage_info['wwns'])
|
drv.common.storage_info['wwns'])
|
||||||
self.assertEqual(1, brick_get_connector_properties.call_count)
|
self.assertEqual(1, brick_get_connector_properties.call_count)
|
||||||
self.assertEqual(3, request.call_count)
|
self.assertEqual(4, request.call_count)
|
||||||
# stop the Loopingcall within the do_setup treatment
|
# stop the Loopingcall within the do_setup treatment
|
||||||
self.driver.common.client.keep_session_loop.stop()
|
self.driver.common.client.keep_session_loop.stop()
|
||||||
self.driver.common.client.keep_session_loop.wait()
|
self.driver.common.client.keep_session_loop.wait()
|
||||||
@ -607,13 +644,14 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
FakeResponse(200, NOTFOUND_RESULT),
|
FakeResponse(200, NOTFOUND_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
|
FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)]
|
||||||
drv.do_setup(None)
|
drv.do_setup(None)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
{CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']},
|
{CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']},
|
||||||
drv.common.storage_info['wwns'])
|
drv.common.storage_info['wwns'])
|
||||||
self.assertEqual(1, brick_get_connector_properties.call_count)
|
self.assertEqual(1, brick_get_connector_properties.call_count)
|
||||||
self.assertEqual(8, request.call_count)
|
self.assertEqual(9, request.call_count)
|
||||||
# stop the Loopingcall within the do_setup treatment
|
# stop the Loopingcall within the do_setup treatment
|
||||||
self.driver.common.client.keep_session_loop.stop()
|
self.driver.common.client.keep_session_loop.stop()
|
||||||
self.driver.common.client.keep_session_loop.wait()
|
self.driver.common.client.keep_session_loop.wait()
|
||||||
@ -635,13 +673,14 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
FakeResponse(200, NOTFOUND_RESULT),
|
FakeResponse(200, NOTFOUND_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
|
FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)]
|
||||||
drv.do_setup(None)
|
drv.do_setup(None)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
{CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']},
|
{CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']},
|
||||||
drv.common.storage_info['wwns'])
|
drv.common.storage_info['wwns'])
|
||||||
self.assertEqual(1, brick_get_connector_properties.call_count)
|
self.assertEqual(1, brick_get_connector_properties.call_count)
|
||||||
self.assertEqual(8, request.call_count)
|
self.assertEqual(9, request.call_count)
|
||||||
# stop the Loopingcall within the do_setup treatment
|
# stop the Loopingcall within the do_setup treatment
|
||||||
self.driver.common.client.keep_session_loop.stop()
|
self.driver.common.client.keep_session_loop.stop()
|
||||||
self.driver.common.client.keep_session_loop.wait()
|
self.driver.common.client.keep_session_loop.wait()
|
||||||
@ -687,13 +726,14 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
|
FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)]
|
||||||
drv.do_setup(None)
|
drv.do_setup(None)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
{CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']},
|
{CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']},
|
||||||
drv.common.storage_info['wwns'])
|
drv.common.storage_info['wwns'])
|
||||||
self.assertEqual(1, brick_get_connector_properties.call_count)
|
self.assertEqual(1, brick_get_connector_properties.call_count)
|
||||||
self.assertEqual(9, request.call_count)
|
self.assertEqual(10, request.call_count)
|
||||||
# stop the Loopingcall within the do_setup treatment
|
# stop the Loopingcall within the do_setup treatment
|
||||||
self.driver.common.client.keep_session_loop.stop()
|
self.driver.common.client.keep_session_loop.stop()
|
||||||
self.driver.common.client.keep_session_loop.wait()
|
self.driver.common.client.keep_session_loop.wait()
|
||||||
@ -712,13 +752,14 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
|
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
|
||||||
FakeResponse(200, GET_POOLS_RESULT),
|
FakeResponse(200, GET_POOLS_RESULT),
|
||||||
FakeResponse(200, GET_PORTS_RESULT),
|
FakeResponse(200, GET_PORTS_RESULT),
|
||||||
FakeResponse(200, GET_HOST_WWNS_RESULT)]
|
FakeResponse(200, GET_HOST_WWNS_RESULT),
|
||||||
|
FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)]
|
||||||
drv.do_setup(None)
|
drv.do_setup(None)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
{CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']},
|
{CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']},
|
||||||
drv.common.storage_info['wwns'])
|
drv.common.storage_info['wwns'])
|
||||||
self.assertEqual(1, brick_get_connector_properties.call_count)
|
self.assertEqual(1, brick_get_connector_properties.call_count)
|
||||||
self.assertEqual(4, request.call_count)
|
self.assertEqual(5, request.call_count)
|
||||||
self.configuration.hitachi_pool = tmp_pool
|
self.configuration.hitachi_pool = tmp_pool
|
||||||
# stop the Loopingcall within the do_setup treatment
|
# stop the Loopingcall within the do_setup treatment
|
||||||
self.driver.common.client.keep_session_loop.stop()
|
self.driver.common.client.keep_session_loop.stop()
|
||||||
@ -835,9 +876,13 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
FakeResponse(200, GET_LDEV_RESULT),
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(200, GET_LDEV_RESULT),
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(200, GET_LDEV_RESULT),
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||||
self.driver.delete_snapshot(TEST_SNAPSHOT[0])
|
self.driver.delete_snapshot(TEST_SNAPSHOT[0])
|
||||||
self.assertEqual(10, request.call_count)
|
self.assertEqual(14, request.call_count)
|
||||||
|
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
def test_delete_snapshot_no_pair(self, request):
|
def test_delete_snapshot_no_pair(self, request):
|
||||||
@ -946,6 +991,32 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
self.assertEqual(5, request.call_count)
|
self.assertEqual(5, request.call_count)
|
||||||
self.assertEqual(1, add_fc_zone.call_count)
|
self.assertEqual(1, add_fc_zone.call_count)
|
||||||
|
|
||||||
|
@mock.patch.object(fczm_utils, "add_fc_zone")
|
||||||
|
@mock.patch.object(requests.Session, "request")
|
||||||
|
@mock.patch.object(volume_types, 'get_volume_type_extra_specs')
|
||||||
|
def test_create_target_to_storage_return(
|
||||||
|
self, get_volume_type_extra_specs, request, add_fc_zone):
|
||||||
|
self.configuration.hitachi_zoning_request = True
|
||||||
|
self.driver.common._lookup_service = FakeLookupService()
|
||||||
|
extra_specs = {"hbsd:target_ports": "CL1-A"}
|
||||||
|
get_volume_type_extra_specs.return_value = extra_specs
|
||||||
|
request.side_effect = [
|
||||||
|
FakeResponse(200, NOTFOUND_RESULT),
|
||||||
|
FakeResponse(200, NOTFOUND_RESULT),
|
||||||
|
FakeResponse(200, GET_HOST_GROUPS_RESULT),
|
||||||
|
FakeResponse(200, NOTFOUND_RESULT),
|
||||||
|
FakeResponse(400, GET_HOST_GROUPS_RESULT),
|
||||||
|
FakeResponse(200, GET_HOST_GROUPS_RESULT_TEST),
|
||||||
|
FakeResponse(200, GET_HOST_GROUPS_RESULT_TEST),
|
||||||
|
]
|
||||||
|
self.assertRaises(exception.VolumeDriverException,
|
||||||
|
self.driver.initialize_connection,
|
||||||
|
TEST_VOLUME[1],
|
||||||
|
DEFAULT_CONNECTOR)
|
||||||
|
self.assertEqual(1, get_volume_type_extra_specs.call_count)
|
||||||
|
self.assertEqual(10, request.call_count)
|
||||||
|
self.assertEqual(0, add_fc_zone.call_count)
|
||||||
|
|
||||||
@mock.patch.object(fczm_utils, "remove_fc_zone")
|
@mock.patch.object(fczm_utils, "remove_fc_zone")
|
||||||
@mock.patch.object(requests.Session, "request")
|
@mock.patch.object(requests.Session, "request")
|
||||||
def test_terminate_connection(self, request, remove_fc_zone):
|
def test_terminate_connection(self, request, remove_fc_zone):
|
||||||
@ -1319,10 +1390,14 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
FakeResponse(200, GET_LDEV_RESULT),
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(200, GET_LDEV_RESULT),
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(200, GET_LDEV_RESULT),
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||||
ret = self.driver.delete_group_snapshot(
|
ret = self.driver.delete_group_snapshot(
|
||||||
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]])
|
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]])
|
||||||
self.assertEqual(10, request.call_count)
|
self.assertEqual(14, request.call_count)
|
||||||
actual = (
|
actual = (
|
||||||
{'status': TEST_GROUP_SNAP[0]['status']},
|
{'status': TEST_GROUP_SNAP[0]['status']},
|
||||||
[{'id': TEST_SNAPSHOT[0]['id'], 'status': 'deleted'}]
|
[{'id': TEST_SNAPSHOT[0]['id'], 'status': 'deleted'}]
|
||||||
@ -1335,7 +1410,15 @@ class HBSDRESTFCDriverTest(test.TestCase):
|
|||||||
ret = self.driver.get_driver_options()
|
ret = self.driver.get_driver_options()
|
||||||
actual = (hbsd_common.COMMON_VOLUME_OPTS +
|
actual = (hbsd_common.COMMON_VOLUME_OPTS +
|
||||||
hbsd_common.COMMON_PORT_OPTS +
|
hbsd_common.COMMON_PORT_OPTS +
|
||||||
|
hbsd_common.COMMON_PAIR_OPTS +
|
||||||
hbsd_common.COMMON_NAME_OPTS +
|
hbsd_common.COMMON_NAME_OPTS +
|
||||||
hbsd_rest.REST_VOLUME_OPTS +
|
hbsd_rest.REST_VOLUME_OPTS +
|
||||||
hbsd_rest_fc.FC_VOLUME_OPTS)
|
hbsd_rest.REST_PAIR_OPTS +
|
||||||
|
hbsd_rest_fc.FC_VOLUME_OPTS +
|
||||||
|
hbsd_replication._REP_OPTS +
|
||||||
|
hbsd_replication.COMMON_MIRROR_OPTS +
|
||||||
|
hbsd_replication.ISCSI_MIRROR_OPTS +
|
||||||
|
hbsd_replication.REST_MIRROR_OPTS +
|
||||||
|
hbsd_replication.REST_MIRROR_API_OPTS +
|
||||||
|
hbsd_replication.REST_MIRROR_SSL_OPTS)
|
||||||
self.assertEqual(actual, ret)
|
self.assertEqual(actual, ret)
|
||||||
|
@ -33,6 +33,7 @@ from cinder.volume import configuration as conf
|
|||||||
from cinder.volume import driver
|
from cinder.volume import driver
|
||||||
from cinder.volume.drivers.hitachi import hbsd_common
|
from cinder.volume.drivers.hitachi import hbsd_common
|
||||||
from cinder.volume.drivers.hitachi import hbsd_iscsi
|
from cinder.volume.drivers.hitachi import hbsd_iscsi
|
||||||
|
from cinder.volume.drivers.hitachi import hbsd_replication
|
||||||
from cinder.volume.drivers.hitachi import hbsd_rest
|
from cinder.volume.drivers.hitachi import hbsd_rest
|
||||||
from cinder.volume.drivers.hitachi import hbsd_rest_api
|
from cinder.volume.drivers.hitachi import hbsd_rest_api
|
||||||
from cinder.volume import volume_types
|
from cinder.volume import volume_types
|
||||||
@ -252,6 +253,16 @@ GET_SNAPSHOTS_RESULT_PAIR = {
|
|||||||
],
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
GET_HOST_GROUPS_RESULT_PAIR = {
|
||||||
|
"data": [
|
||||||
|
{
|
||||||
|
"hostGroupNumber": 1,
|
||||||
|
"portId": CONFIG_MAP['port_id'],
|
||||||
|
"hostGroupName": "HBSD-pair00",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
GET_LDEVS_RESULT = {
|
GET_LDEVS_RESULT = {
|
||||||
"data": [
|
"data": [
|
||||||
{
|
{
|
||||||
@ -354,7 +365,6 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
self.configuration.hitachi_rest_disable_io_wait = True
|
self.configuration.hitachi_rest_disable_io_wait = True
|
||||||
self.configuration.hitachi_rest_tcp_keepalive = True
|
self.configuration.hitachi_rest_tcp_keepalive = True
|
||||||
self.configuration.hitachi_discard_zero_page = True
|
self.configuration.hitachi_discard_zero_page = True
|
||||||
self.configuration.hitachi_rest_number = "0"
|
|
||||||
self.configuration.hitachi_lun_timeout = hbsd_rest._LUN_TIMEOUT
|
self.configuration.hitachi_lun_timeout = hbsd_rest._LUN_TIMEOUT
|
||||||
self.configuration.hitachi_lun_retry_interval = (
|
self.configuration.hitachi_lun_retry_interval = (
|
||||||
hbsd_rest._LUN_RETRY_INTERVAL)
|
hbsd_rest._LUN_RETRY_INTERVAL)
|
||||||
@ -400,6 +410,21 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
self.configuration.ssh_min_pool_conn = '1'
|
self.configuration.ssh_min_pool_conn = '1'
|
||||||
self.configuration.ssh_max_pool_conn = '5'
|
self.configuration.ssh_max_pool_conn = '5'
|
||||||
|
|
||||||
|
self.configuration.hitachi_replication_number = 0
|
||||||
|
self.configuration.hitachi_pair_target_number = 0
|
||||||
|
self.configuration.hitachi_rest_pair_target_ports = []
|
||||||
|
self.configuration.hitachi_quorum_disk_id = ''
|
||||||
|
self.configuration.hitachi_mirror_copy_speed = ''
|
||||||
|
self.configuration.hitachi_mirror_storage_id = ''
|
||||||
|
self.configuration.hitachi_mirror_pool = ''
|
||||||
|
self.configuration.hitachi_mirror_ldev_range = ''
|
||||||
|
self.configuration.hitachi_mirror_target_ports = ''
|
||||||
|
self.configuration.hitachi_mirror_rest_user = ''
|
||||||
|
self.configuration.hitachi_mirror_rest_password = ''
|
||||||
|
self.configuration.hitachi_mirror_rest_api_ip = ''
|
||||||
|
self.configuration.hitachi_set_mirror_reserve_attribute = ''
|
||||||
|
self.configuration.hitachi_path_group_id = ''
|
||||||
|
|
||||||
self.configuration.safe_get = self._fake_safe_get
|
self.configuration.safe_get = self._fake_safe_get
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@ -426,7 +451,8 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
FakeResponse(200, GET_PORTS_RESULT),
|
FakeResponse(200, GET_PORTS_RESULT),
|
||||||
FakeResponse(200, GET_PORT_RESULT),
|
FakeResponse(200, GET_PORT_RESULT),
|
||||||
FakeResponse(200, GET_HOST_ISCSIS_RESULT),
|
FakeResponse(200, GET_HOST_ISCSIS_RESULT),
|
||||||
FakeResponse(200, GET_HOST_GROUP_RESULT)]
|
FakeResponse(200, GET_HOST_GROUP_RESULT),
|
||||||
|
FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)]
|
||||||
self.driver.do_setup(None)
|
self.driver.do_setup(None)
|
||||||
self.driver.check_for_setup_error()
|
self.driver.check_for_setup_error()
|
||||||
self.driver.local_path(None)
|
self.driver.local_path(None)
|
||||||
@ -455,7 +481,8 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
FakeResponse(200, GET_PORTS_RESULT),
|
FakeResponse(200, GET_PORTS_RESULT),
|
||||||
FakeResponse(200, GET_PORT_RESULT),
|
FakeResponse(200, GET_PORT_RESULT),
|
||||||
FakeResponse(200, GET_HOST_ISCSIS_RESULT),
|
FakeResponse(200, GET_HOST_ISCSIS_RESULT),
|
||||||
FakeResponse(200, GET_HOST_GROUP_RESULT)]
|
FakeResponse(200, GET_HOST_GROUP_RESULT),
|
||||||
|
FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)]
|
||||||
drv.do_setup(None)
|
drv.do_setup(None)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
{CONFIG_MAP['port_id']:
|
{CONFIG_MAP['port_id']:
|
||||||
@ -464,7 +491,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
'port': CONFIG_MAP['tcpPort']}},
|
'port': CONFIG_MAP['tcpPort']}},
|
||||||
drv.common.storage_info['portals'])
|
drv.common.storage_info['portals'])
|
||||||
self.assertEqual(1, brick_get_connector_properties.call_count)
|
self.assertEqual(1, brick_get_connector_properties.call_count)
|
||||||
self.assertEqual(5, request.call_count)
|
self.assertEqual(6, request.call_count)
|
||||||
# stop the Loopingcall within the do_setup treatment
|
# stop the Loopingcall within the do_setup treatment
|
||||||
self.driver.common.client.keep_session_loop.stop()
|
self.driver.common.client.keep_session_loop.stop()
|
||||||
self.driver.common.client.keep_session_loop.wait()
|
self.driver.common.client.keep_session_loop.wait()
|
||||||
@ -485,7 +512,8 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
FakeResponse(200, NOTFOUND_RESULT),
|
FakeResponse(200, NOTFOUND_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
|
FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)]
|
||||||
drv.do_setup(None)
|
drv.do_setup(None)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
{CONFIG_MAP['port_id']:
|
{CONFIG_MAP['port_id']:
|
||||||
@ -494,7 +522,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
'port': CONFIG_MAP['tcpPort']}},
|
'port': CONFIG_MAP['tcpPort']}},
|
||||||
drv.common.storage_info['portals'])
|
drv.common.storage_info['portals'])
|
||||||
self.assertEqual(1, brick_get_connector_properties.call_count)
|
self.assertEqual(1, brick_get_connector_properties.call_count)
|
||||||
self.assertEqual(8, request.call_count)
|
self.assertEqual(9, request.call_count)
|
||||||
# stop the Loopingcall within the do_setup treatment
|
# stop the Loopingcall within the do_setup treatment
|
||||||
self.driver.common.client.keep_session_loop.stop()
|
self.driver.common.client.keep_session_loop.stop()
|
||||||
self.driver.common.client.keep_session_loop.wait()
|
self.driver.common.client.keep_session_loop.wait()
|
||||||
@ -515,7 +543,8 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
FakeResponse(200, NOTFOUND_RESULT),
|
FakeResponse(200, NOTFOUND_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
|
||||||
|
FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)]
|
||||||
drv.do_setup(None)
|
drv.do_setup(None)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
{CONFIG_MAP['port_id']:
|
{CONFIG_MAP['port_id']:
|
||||||
@ -524,7 +553,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
'port': CONFIG_MAP['tcpPort']}},
|
'port': CONFIG_MAP['tcpPort']}},
|
||||||
drv.common.storage_info['portals'])
|
drv.common.storage_info['portals'])
|
||||||
self.assertEqual(1, brick_get_connector_properties.call_count)
|
self.assertEqual(1, brick_get_connector_properties.call_count)
|
||||||
self.assertEqual(8, request.call_count)
|
self.assertEqual(9, request.call_count)
|
||||||
# stop the Loopingcall within the do_setup treatment
|
# stop the Loopingcall within the do_setup treatment
|
||||||
self.driver.common.client.keep_session_loop.stop()
|
self.driver.common.client.keep_session_loop.stop()
|
||||||
self.driver.common.client.keep_session_loop.wait()
|
self.driver.common.client.keep_session_loop.wait()
|
||||||
@ -1025,10 +1054,14 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
FakeResponse(200, GET_LDEV_RESULT),
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(200, GET_LDEV_RESULT),
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(200, GET_LDEV_RESULT),
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
|
FakeResponse(200, GET_LDEV_RESULT),
|
||||||
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
|
||||||
ret = self.driver.delete_group_snapshot(
|
ret = self.driver.delete_group_snapshot(
|
||||||
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]])
|
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]])
|
||||||
self.assertEqual(10, request.call_count)
|
self.assertEqual(14, request.call_count)
|
||||||
actual = (
|
actual = (
|
||||||
{'status': TEST_GROUP_SNAP[0]['status']},
|
{'status': TEST_GROUP_SNAP[0]['status']},
|
||||||
[{'id': TEST_SNAPSHOT[0]['id'], 'status': 'deleted'}]
|
[{'id': TEST_SNAPSHOT[0]['id'], 'status': 'deleted'}]
|
||||||
@ -1040,6 +1073,14 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
|
|||||||
_get_oslo_driver_opts.return_value = []
|
_get_oslo_driver_opts.return_value = []
|
||||||
ret = self.driver.get_driver_options()
|
ret = self.driver.get_driver_options()
|
||||||
actual = (hbsd_common.COMMON_VOLUME_OPTS +
|
actual = (hbsd_common.COMMON_VOLUME_OPTS +
|
||||||
|
hbsd_common.COMMON_PAIR_OPTS +
|
||||||
hbsd_common.COMMON_NAME_OPTS +
|
hbsd_common.COMMON_NAME_OPTS +
|
||||||
hbsd_rest.REST_VOLUME_OPTS)
|
hbsd_rest.REST_VOLUME_OPTS +
|
||||||
|
hbsd_rest.REST_PAIR_OPTS +
|
||||||
|
hbsd_replication._REP_OPTS +
|
||||||
|
hbsd_replication.COMMON_MIRROR_OPTS +
|
||||||
|
hbsd_replication.ISCSI_MIRROR_OPTS +
|
||||||
|
hbsd_replication.REST_MIRROR_OPTS +
|
||||||
|
hbsd_replication.REST_MIRROR_API_OPTS +
|
||||||
|
hbsd_replication.REST_MIRROR_SSL_OPTS)
|
||||||
self.assertEqual(actual, ret)
|
self.assertEqual(actual, ret)
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
#
|
#
|
||||||
"""Common module for Hitachi HBSD Driver."""
|
"""Common module for Hitachi HBSD Driver."""
|
||||||
|
|
||||||
|
import json
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
@ -45,8 +46,8 @@ _GROUP_NAME_VAR_LEN = {GROUP_NAME_VAR_WWN: _GROUP_NAME_VAR_WWN_LEN,
|
|||||||
GROUP_NAME_VAR_IP: _GROUP_NAME_VAR_IP_LEN,
|
GROUP_NAME_VAR_IP: _GROUP_NAME_VAR_IP_LEN,
|
||||||
GROUP_NAME_VAR_HOST: _GROUP_NAME_VAR_HOST_LEN}
|
GROUP_NAME_VAR_HOST: _GROUP_NAME_VAR_HOST_LEN}
|
||||||
|
|
||||||
_STR_VOLUME = 'volume'
|
STR_VOLUME = 'volume'
|
||||||
_STR_SNAPSHOT = 'snapshot'
|
STR_SNAPSHOT = 'snapshot'
|
||||||
|
|
||||||
_INHERITED_VOLUME_OPTS = [
|
_INHERITED_VOLUME_OPTS = [
|
||||||
'volume_backend_name',
|
'volume_backend_name',
|
||||||
@ -131,6 +132,13 @@ COMMON_PORT_OPTS = [
|
|||||||
'WWNs are registered to ports in a round-robin fashion.'),
|
'WWNs are registered to ports in a round-robin fashion.'),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
COMMON_PAIR_OPTS = [
|
||||||
|
cfg.IntOpt(
|
||||||
|
'hitachi_pair_target_number',
|
||||||
|
default=0, min=0, max=99,
|
||||||
|
help='Pair target name of the host group or iSCSI target'),
|
||||||
|
]
|
||||||
|
|
||||||
COMMON_NAME_OPTS = [
|
COMMON_NAME_OPTS = [
|
||||||
cfg.StrOpt(
|
cfg.StrOpt(
|
||||||
'hitachi_group_name_format',
|
'hitachi_group_name_format',
|
||||||
@ -162,13 +170,14 @@ _GROUP_NAME_FORMAT = {
|
|||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
CONF.register_opts(COMMON_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP)
|
CONF.register_opts(COMMON_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP)
|
||||||
CONF.register_opts(COMMON_PORT_OPTS, group=configuration.SHARED_CONF_GROUP)
|
CONF.register_opts(COMMON_PORT_OPTS, group=configuration.SHARED_CONF_GROUP)
|
||||||
|
CONF.register_opts(COMMON_PAIR_OPTS, group=configuration.SHARED_CONF_GROUP)
|
||||||
CONF.register_opts(COMMON_NAME_OPTS, group=configuration.SHARED_CONF_GROUP)
|
CONF.register_opts(COMMON_NAME_OPTS, group=configuration.SHARED_CONF_GROUP)
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
MSG = utils.HBSDMsg
|
MSG = utils.HBSDMsg
|
||||||
|
|
||||||
|
|
||||||
def _str2int(num):
|
def str2int(num):
|
||||||
"""Convert a string into an integer."""
|
"""Convert a string into an integer."""
|
||||||
if not num:
|
if not num:
|
||||||
return None
|
return None
|
||||||
@ -202,9 +211,11 @@ class HBSDCommon():
|
|||||||
'ldev_range': [],
|
'ldev_range': [],
|
||||||
'controller_ports': [],
|
'controller_ports': [],
|
||||||
'compute_ports': [],
|
'compute_ports': [],
|
||||||
|
'pair_ports': [],
|
||||||
'wwns': {},
|
'wwns': {},
|
||||||
'portals': {},
|
'portals': {},
|
||||||
}
|
}
|
||||||
|
self.storage_id = None
|
||||||
self.group_name_format = _GROUP_NAME_FORMAT[driverinfo['proto']]
|
self.group_name_format = _GROUP_NAME_FORMAT[driverinfo['proto']]
|
||||||
self.format_info = {
|
self.format_info = {
|
||||||
'group_name_format': self.group_name_format[
|
'group_name_format': self.group_name_format[
|
||||||
@ -255,7 +266,7 @@ class HBSDCommon():
|
|||||||
ldev = self.create_ldev(volume['size'], pool_id, ldev_range)
|
ldev = self.create_ldev(volume['size'], pool_id, ldev_range)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
utils.output_log(MSG.CREATE_LDEV_FAILED)
|
self.output_log(MSG.CREATE_LDEV_FAILED)
|
||||||
self.modify_ldev_name(ldev, volume['id'].replace("-", ""))
|
self.modify_ldev_name(ldev, volume['id'].replace("-", ""))
|
||||||
return {
|
return {
|
||||||
'provider_location': str(ldev),
|
'provider_location': str(ldev),
|
||||||
@ -276,33 +287,33 @@ class HBSDCommon():
|
|||||||
|
|
||||||
def copy_on_storage(
|
def copy_on_storage(
|
||||||
self, pvol, size, pool_id, snap_pool_id, ldev_range,
|
self, pvol, size, pool_id, snap_pool_id, ldev_range,
|
||||||
is_snapshot=False, sync=False):
|
is_snapshot=False, sync=False, is_rep=False):
|
||||||
"""Create a copy of the specified LDEV on the storage."""
|
"""Create a copy of the specified LDEV on the storage."""
|
||||||
ldev_info = self.get_ldev_info(['status', 'attributes'], pvol)
|
ldev_info = self.get_ldev_info(['status', 'attributes'], pvol)
|
||||||
if ldev_info['status'] != 'NML':
|
if ldev_info['status'] != 'NML':
|
||||||
msg = utils.output_log(MSG.INVALID_LDEV_STATUS_FOR_COPY, ldev=pvol)
|
msg = self.output_log(MSG.INVALID_LDEV_STATUS_FOR_COPY, ldev=pvol)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
svol = self.create_ldev(size, pool_id, ldev_range)
|
svol = self.create_ldev(size, pool_id, ldev_range)
|
||||||
try:
|
try:
|
||||||
self.create_pair_on_storage(
|
self.create_pair_on_storage(
|
||||||
pvol, svol, snap_pool_id, is_snapshot=is_snapshot)
|
pvol, svol, snap_pool_id, is_snapshot=is_snapshot)
|
||||||
if sync:
|
if sync or is_rep:
|
||||||
self.wait_copy_completion(pvol, svol)
|
self.wait_copy_completion(pvol, svol)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
try:
|
try:
|
||||||
self.delete_ldev(svol)
|
self.delete_ldev(svol)
|
||||||
except exception.VolumeDriverException:
|
except exception.VolumeDriverException:
|
||||||
utils.output_log(MSG.DELETE_LDEV_FAILED, ldev=svol)
|
self.output_log(MSG.DELETE_LDEV_FAILED, ldev=svol)
|
||||||
return svol
|
return svol
|
||||||
|
|
||||||
def create_volume_from_src(self, volume, src, src_type):
|
def create_volume_from_src(self, volume, src, src_type, is_rep=False):
|
||||||
"""Create a volume from a volume or snapshot and return its properties.
|
"""Create a volume from a volume or snapshot and return its properties.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
ldev = utils.get_ldev(src)
|
ldev = self.get_ldev(src)
|
||||||
if ldev is None:
|
if ldev is None:
|
||||||
msg = utils.output_log(
|
msg = self.output_log(
|
||||||
MSG.INVALID_LDEV_FOR_VOLUME_COPY, type=src_type, id=src['id'])
|
MSG.INVALID_LDEV_FOR_VOLUME_COPY, type=src_type, id=src['id'])
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
|
|
||||||
@ -311,8 +322,10 @@ class HBSDCommon():
|
|||||||
snap_pool_id = self.storage_info['snap_pool_id']
|
snap_pool_id = self.storage_info['snap_pool_id']
|
||||||
ldev_range = self.storage_info['ldev_range']
|
ldev_range = self.storage_info['ldev_range']
|
||||||
new_ldev = self.copy_on_storage(
|
new_ldev = self.copy_on_storage(
|
||||||
ldev, size, pool_id, snap_pool_id, ldev_range)
|
ldev, size, pool_id, snap_pool_id, ldev_range, is_rep=is_rep)
|
||||||
self.modify_ldev_name(new_ldev, volume['id'].replace("-", ""))
|
self.modify_ldev_name(new_ldev, volume['id'].replace("-", ""))
|
||||||
|
if is_rep:
|
||||||
|
self.delete_pair(new_ldev)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'provider_location': str(new_ldev),
|
'provider_location': str(new_ldev),
|
||||||
@ -320,11 +333,11 @@ class HBSDCommon():
|
|||||||
|
|
||||||
def create_cloned_volume(self, volume, src_vref):
|
def create_cloned_volume(self, volume, src_vref):
|
||||||
"""Create a clone of the specified volume and return its properties."""
|
"""Create a clone of the specified volume and return its properties."""
|
||||||
return self.create_volume_from_src(volume, src_vref, _STR_VOLUME)
|
return self.create_volume_from_src(volume, src_vref, STR_VOLUME)
|
||||||
|
|
||||||
def create_volume_from_snapshot(self, volume, snapshot):
|
def create_volume_from_snapshot(self, volume, snapshot):
|
||||||
"""Create a volume from a snapshot and return its properties."""
|
"""Create a volume from a snapshot and return its properties."""
|
||||||
return self.create_volume_from_src(volume, snapshot, _STR_SNAPSHOT)
|
return self.create_volume_from_src(volume, snapshot, STR_SNAPSHOT)
|
||||||
|
|
||||||
def delete_pair_based_on_svol(self, pvol, svol_info):
|
def delete_pair_based_on_svol(self, pvol, svol_info):
|
||||||
"""Disconnect all volume pairs to which the specified S-VOL belongs."""
|
"""Disconnect all volume pairs to which the specified S-VOL belongs."""
|
||||||
@ -340,7 +353,7 @@ class HBSDCommon():
|
|||||||
if not pair_info:
|
if not pair_info:
|
||||||
return
|
return
|
||||||
if pair_info['pvol'] == ldev:
|
if pair_info['pvol'] == ldev:
|
||||||
utils.output_log(
|
self.output_log(
|
||||||
MSG.UNABLE_TO_DELETE_PAIR, pvol=pair_info['pvol'])
|
MSG.UNABLE_TO_DELETE_PAIR, pvol=pair_info['pvol'])
|
||||||
self.raise_busy()
|
self.raise_busy()
|
||||||
else:
|
else:
|
||||||
@ -375,9 +388,9 @@ class HBSDCommon():
|
|||||||
|
|
||||||
def delete_volume(self, volume):
|
def delete_volume(self, volume):
|
||||||
"""Delete the specified volume."""
|
"""Delete the specified volume."""
|
||||||
ldev = utils.get_ldev(volume)
|
ldev = self.get_ldev(volume)
|
||||||
if ldev is None:
|
if ldev is None:
|
||||||
utils.output_log(
|
self.output_log(
|
||||||
MSG.INVALID_LDEV_FOR_DELETION,
|
MSG.INVALID_LDEV_FOR_DELETION,
|
||||||
method='delete_volume', id=volume['id'])
|
method='delete_volume', id=volume['id'])
|
||||||
return
|
return
|
||||||
@ -392,9 +405,9 @@ class HBSDCommon():
|
|||||||
def create_snapshot(self, snapshot):
|
def create_snapshot(self, snapshot):
|
||||||
"""Create a snapshot from a volume and return its properties."""
|
"""Create a snapshot from a volume and return its properties."""
|
||||||
src_vref = snapshot.volume
|
src_vref = snapshot.volume
|
||||||
ldev = utils.get_ldev(src_vref)
|
ldev = self.get_ldev(src_vref)
|
||||||
if ldev is None:
|
if ldev is None:
|
||||||
msg = utils.output_log(
|
msg = self.output_log(
|
||||||
MSG.INVALID_LDEV_FOR_VOLUME_COPY,
|
MSG.INVALID_LDEV_FOR_VOLUME_COPY,
|
||||||
type='volume', id=src_vref['id'])
|
type='volume', id=src_vref['id'])
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
@ -410,9 +423,9 @@ class HBSDCommon():
|
|||||||
|
|
||||||
def delete_snapshot(self, snapshot):
|
def delete_snapshot(self, snapshot):
|
||||||
"""Delete the specified snapshot."""
|
"""Delete the specified snapshot."""
|
||||||
ldev = utils.get_ldev(snapshot)
|
ldev = self.get_ldev(snapshot)
|
||||||
if ldev is None:
|
if ldev is None:
|
||||||
utils.output_log(
|
self.output_log(
|
||||||
MSG.INVALID_LDEV_FOR_DELETION, method='delete_snapshot',
|
MSG.INVALID_LDEV_FOR_DELETION, method='delete_snapshot',
|
||||||
id=snapshot['id'])
|
id=snapshot['id'])
|
||||||
return
|
return
|
||||||
@ -453,7 +466,7 @@ class HBSDCommon():
|
|||||||
single_pool.update(dict(
|
single_pool.update(dict(
|
||||||
provisioned_capacity_gb=0,
|
provisioned_capacity_gb=0,
|
||||||
backend_state='down'))
|
backend_state='down'))
|
||||||
utils.output_log(MSG.POOL_INFO_RETRIEVAL_FAILED, pool=pool_name)
|
self.output_log(MSG.POOL_INFO_RETRIEVAL_FAILED, pool=pool_name)
|
||||||
return single_pool
|
return single_pool
|
||||||
total_capacity, free_capacity, provisioned_capacity = cap_data
|
total_capacity, free_capacity, provisioned_capacity = cap_data
|
||||||
single_pool.update(dict(
|
single_pool.update(dict(
|
||||||
@ -506,14 +519,14 @@ class HBSDCommon():
|
|||||||
|
|
||||||
def extend_volume(self, volume, new_size):
|
def extend_volume(self, volume, new_size):
|
||||||
"""Extend the specified volume to the specified size."""
|
"""Extend the specified volume to the specified size."""
|
||||||
ldev = utils.get_ldev(volume)
|
ldev = self.get_ldev(volume)
|
||||||
if ldev is None:
|
if ldev is None:
|
||||||
msg = utils.output_log(MSG.INVALID_LDEV_FOR_EXTENSION,
|
msg = self.output_log(MSG.INVALID_LDEV_FOR_EXTENSION,
|
||||||
volume_id=volume['id'])
|
volume_id=volume['id'])
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
if self.check_pair_svol(ldev):
|
if self.check_pair_svol(ldev):
|
||||||
msg = utils.output_log(MSG.INVALID_VOLUME_TYPE_FOR_EXTEND,
|
msg = self.output_log(MSG.INVALID_VOLUME_TYPE_FOR_EXTEND,
|
||||||
volume_id=volume['id'])
|
volume_id=volume['id'])
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
self.delete_pair(ldev)
|
self.delete_pair(ldev)
|
||||||
self.extend_ldev(ldev, volume['size'], new_size)
|
self.extend_ldev(ldev, volume['size'], new_size)
|
||||||
@ -532,7 +545,7 @@ class HBSDCommon():
|
|||||||
ldev = self.get_ldev_by_name(
|
ldev = self.get_ldev_by_name(
|
||||||
existing_ref.get('source-name').replace('-', ''))
|
existing_ref.get('source-name').replace('-', ''))
|
||||||
elif 'source-id' in existing_ref:
|
elif 'source-id' in existing_ref:
|
||||||
ldev = _str2int(existing_ref.get('source-id'))
|
ldev = str2int(existing_ref.get('source-id'))
|
||||||
self.check_ldev_manageability(ldev, existing_ref)
|
self.check_ldev_manageability(ldev, existing_ref)
|
||||||
self.modify_ldev_name(ldev, volume['id'].replace("-", ""))
|
self.modify_ldev_name(ldev, volume['id'].replace("-", ""))
|
||||||
return {
|
return {
|
||||||
@ -543,29 +556,29 @@ class HBSDCommon():
|
|||||||
"""Return the size[GB] of the specified LDEV."""
|
"""Return the size[GB] of the specified LDEV."""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def manage_existing_get_size(self, existing_ref):
|
def manage_existing_get_size(self, volume, existing_ref):
|
||||||
"""Return the size[GB] of the specified volume."""
|
"""Return the size[GB] of the specified volume."""
|
||||||
ldev = None
|
ldev = None
|
||||||
if 'source-name' in existing_ref:
|
if 'source-name' in existing_ref:
|
||||||
ldev = self.get_ldev_by_name(
|
ldev = self.get_ldev_by_name(
|
||||||
existing_ref.get('source-name').replace("-", ""))
|
existing_ref.get('source-name').replace("-", ""))
|
||||||
elif 'source-id' in existing_ref:
|
elif 'source-id' in existing_ref:
|
||||||
ldev = _str2int(existing_ref.get('source-id'))
|
ldev = str2int(existing_ref.get('source-id'))
|
||||||
if ldev is None:
|
if ldev is None:
|
||||||
msg = utils.output_log(MSG.INVALID_LDEV_FOR_MANAGE)
|
msg = self.output_log(MSG.INVALID_LDEV_FOR_MANAGE)
|
||||||
raise exception.ManageExistingInvalidReference(
|
raise exception.ManageExistingInvalidReference(
|
||||||
existing_ref=existing_ref, reason=msg)
|
existing_ref=existing_ref, reason=msg)
|
||||||
return self.get_ldev_size_in_gigabyte(ldev, existing_ref)
|
return self.get_ldev_size_in_gigabyte(ldev, existing_ref)
|
||||||
|
|
||||||
def unmanage(self, volume):
|
def unmanage(self, volume):
|
||||||
"""Prepare the volume for removing it from Cinder management."""
|
"""Prepare the volume for removing it from Cinder management."""
|
||||||
ldev = utils.get_ldev(volume)
|
ldev = self.get_ldev(volume)
|
||||||
if ldev is None:
|
if ldev is None:
|
||||||
utils.output_log(MSG.INVALID_LDEV_FOR_DELETION, method='unmanage',
|
self.output_log(MSG.INVALID_LDEV_FOR_DELETION, method='unmanage',
|
||||||
id=volume['id'])
|
id=volume['id'])
|
||||||
return
|
return
|
||||||
if self.check_pair_svol(ldev):
|
if self.check_pair_svol(ldev):
|
||||||
utils.output_log(
|
self.output_log(
|
||||||
MSG.INVALID_LDEV_TYPE_FOR_UNMANAGE, volume_id=volume['id'],
|
MSG.INVALID_LDEV_TYPE_FOR_UNMANAGE, volume_id=volume['id'],
|
||||||
volume_type=utils.NORMAL_LDEV_TYPE)
|
volume_type=utils.NORMAL_LDEV_TYPE)
|
||||||
raise exception.VolumeIsBusy(volume_name=volume['name'])
|
raise exception.VolumeIsBusy(volume_name=volume['name'])
|
||||||
@ -579,10 +592,10 @@ class HBSDCommon():
|
|||||||
|
|
||||||
def _range2list(self, param):
|
def _range2list(self, param):
|
||||||
"""Analyze a 'xxx-xxx' string and return a list of two integers."""
|
"""Analyze a 'xxx-xxx' string and return a list of two integers."""
|
||||||
values = [_str2int(value) for value in
|
values = [str2int(value) for value in
|
||||||
self.conf.safe_get(param).split('-')]
|
self.conf.safe_get(param).split('-')]
|
||||||
if len(values) != 2 or None in values or values[0] > values[1]:
|
if len(values) != 2 or None in values or values[0] > values[1]:
|
||||||
msg = utils.output_log(MSG.INVALID_PARAMETER, param=param)
|
msg = self.output_log(MSG.INVALID_PARAMETER, param=param)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
return values
|
return values
|
||||||
|
|
||||||
@ -594,31 +607,35 @@ class HBSDCommon():
|
|||||||
self.check_opts(self.conf, COMMON_PORT_OPTS)
|
self.check_opts(self.conf, COMMON_PORT_OPTS)
|
||||||
if (self.conf.hitachi_port_scheduler and
|
if (self.conf.hitachi_port_scheduler and
|
||||||
not self.conf.hitachi_group_create):
|
not self.conf.hitachi_group_create):
|
||||||
msg = utils.output_log(
|
msg = self.output_log(
|
||||||
MSG.INVALID_PARAMETER,
|
MSG.INVALID_PARAMETER,
|
||||||
param=self.driver_info['param_prefix'] + '_port_scheduler')
|
param=self.driver_info['param_prefix'] + '_port_scheduler')
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
if (self._lookup_service is None and
|
if (self._lookup_service is None and
|
||||||
self.conf.hitachi_port_scheduler):
|
self.conf.hitachi_port_scheduler):
|
||||||
msg = utils.output_log(MSG.ZONE_MANAGER_IS_NOT_AVAILABLE)
|
msg = self.output_log(MSG.ZONE_MANAGER_IS_NOT_AVAILABLE)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
|
|
||||||
def check_param_iscsi(self):
|
def check_param_iscsi(self):
|
||||||
"""Check iSCSI-related parameter values and consistency among them."""
|
"""Check iSCSI-related parameter values and consistency among them."""
|
||||||
if self.conf.use_chap_auth:
|
if self.conf.use_chap_auth:
|
||||||
if not self.conf.chap_username:
|
if not self.conf.chap_username:
|
||||||
msg = utils.output_log(MSG.INVALID_PARAMETER,
|
msg = self.output_log(MSG.INVALID_PARAMETER,
|
||||||
param='chap_username')
|
param='chap_username')
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
if not self.conf.chap_password:
|
if not self.conf.chap_password:
|
||||||
msg = utils.output_log(MSG.INVALID_PARAMETER,
|
msg = self.output_log(MSG.INVALID_PARAMETER,
|
||||||
param='chap_password')
|
param='chap_password')
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
|
|
||||||
def check_param(self):
|
def check_param(self):
|
||||||
"""Check parameter values and consistency among them."""
|
"""Check parameter values and consistency among them."""
|
||||||
utils.check_opt_value(self.conf, _INHERITED_VOLUME_OPTS)
|
self.check_opt_value(self.conf, _INHERITED_VOLUME_OPTS)
|
||||||
self.check_opts(self.conf, COMMON_VOLUME_OPTS)
|
self.check_opts(self.conf, COMMON_VOLUME_OPTS)
|
||||||
|
if hasattr(
|
||||||
|
self.conf,
|
||||||
|
self.driver_info['param_prefix'] + '_pair_target_number'):
|
||||||
|
self.check_opts(self.conf, COMMON_PAIR_OPTS)
|
||||||
if hasattr(
|
if hasattr(
|
||||||
self.conf,
|
self.conf,
|
||||||
self.driver_info['param_prefix'] + '_group_name_format'):
|
self.driver_info['param_prefix'] + '_group_name_format'):
|
||||||
@ -628,7 +645,7 @@ class HBSDCommon():
|
|||||||
self.driver_info['param_prefix'] + '_ldev_range')
|
self.driver_info['param_prefix'] + '_ldev_range')
|
||||||
if (not self.conf.hitachi_target_ports and
|
if (not self.conf.hitachi_target_ports and
|
||||||
not self.conf.hitachi_compute_target_ports):
|
not self.conf.hitachi_compute_target_ports):
|
||||||
msg = utils.output_log(
|
msg = self.output_log(
|
||||||
MSG.INVALID_PARAMETER,
|
MSG.INVALID_PARAMETER,
|
||||||
param=self.driver_info['param_prefix'] + '_target_ports or ' +
|
param=self.driver_info['param_prefix'] + '_target_ports or ' +
|
||||||
self.driver_info['param_prefix'] + '_compute_target_ports')
|
self.driver_info['param_prefix'] + '_compute_target_ports')
|
||||||
@ -636,18 +653,18 @@ class HBSDCommon():
|
|||||||
self._check_param_group_name_format()
|
self._check_param_group_name_format()
|
||||||
if (self.conf.hitachi_group_delete and
|
if (self.conf.hitachi_group_delete and
|
||||||
not self.conf.hitachi_group_create):
|
not self.conf.hitachi_group_create):
|
||||||
msg = utils.output_log(
|
msg = self.output_log(
|
||||||
MSG.INVALID_PARAMETER,
|
MSG.INVALID_PARAMETER,
|
||||||
param=self.driver_info['param_prefix'] + '_group_delete or '
|
param=self.driver_info['param_prefix'] + '_group_delete or '
|
||||||
+ self.driver_info['param_prefix'] + '_group_create')
|
+ self.driver_info['param_prefix'] + '_group_create')
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
for opt in self._required_common_opts:
|
for opt in self._required_common_opts:
|
||||||
if not self.conf.safe_get(opt):
|
if not self.conf.safe_get(opt):
|
||||||
msg = utils.output_log(MSG.INVALID_PARAMETER, param=opt)
|
msg = self.output_log(MSG.INVALID_PARAMETER, param=opt)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
for pool in self.conf.hitachi_pool:
|
for pool in self.conf.hitachi_pool:
|
||||||
if len(pool) == 0:
|
if len(pool) == 0:
|
||||||
msg = utils.output_log(
|
msg = self.output_log(
|
||||||
MSG.INVALID_PARAMETER,
|
MSG.INVALID_PARAMETER,
|
||||||
param=self.driver_info['param_prefix'] + '_pool')
|
param=self.driver_info['param_prefix'] + '_pool')
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
@ -687,7 +704,7 @@ class HBSDCommon():
|
|||||||
'group_name_max_len']:
|
'group_name_max_len']:
|
||||||
error_flag = True
|
error_flag = True
|
||||||
if error_flag:
|
if error_flag:
|
||||||
msg = utils.output_log(
|
msg = self.output_log(
|
||||||
MSG.INVALID_PARAMETER,
|
MSG.INVALID_PARAMETER,
|
||||||
param=self.driver_info['param_prefix'] +
|
param=self.driver_info['param_prefix'] +
|
||||||
'_group_name_format')
|
'_group_name_format')
|
||||||
@ -719,8 +736,8 @@ class HBSDCommon():
|
|||||||
def connect_storage(self):
|
def connect_storage(self):
|
||||||
"""Prepare for using the storage."""
|
"""Prepare for using the storage."""
|
||||||
self.check_pool_id()
|
self.check_pool_id()
|
||||||
utils.output_log(MSG.SET_CONFIG_VALUE, object='DP Pool ID',
|
self.output_log(MSG.SET_CONFIG_VALUE, object='DP Pool ID',
|
||||||
value=self.storage_info['pool_id'])
|
value=self.storage_info['pool_id'])
|
||||||
self.storage_info['controller_ports'] = []
|
self.storage_info['controller_ports'] = []
|
||||||
self.storage_info['compute_ports'] = []
|
self.storage_info['compute_ports'] = []
|
||||||
|
|
||||||
@ -732,8 +749,8 @@ class HBSDCommon():
|
|||||||
"""Return the HBA ID stored in the connector."""
|
"""Return the HBA ID stored in the connector."""
|
||||||
if self.driver_info['hba_id'] in connector:
|
if self.driver_info['hba_id'] in connector:
|
||||||
return connector[self.driver_info['hba_id']]
|
return connector[self.driver_info['hba_id']]
|
||||||
msg = utils.output_log(MSG.RESOURCE_NOT_FOUND,
|
msg = self.output_log(MSG.RESOURCE_NOT_FOUND,
|
||||||
resource=self.driver_info['hba_id_type'])
|
resource=self.driver_info['hba_id_type'])
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
|
|
||||||
def set_device_map(self, targets, hba_ids, volume):
|
def set_device_map(self, targets, hba_ids, volume):
|
||||||
@ -759,7 +776,7 @@ class HBSDCommon():
|
|||||||
for target_port, target_gid in targets['list']:
|
for target_port, target_gid in targets['list']:
|
||||||
if target_port == port:
|
if target_port == port:
|
||||||
return target_gid
|
return target_gid
|
||||||
msg = utils.output_log(MSG.NO_CONNECTED_TARGET)
|
msg = self.output_log(MSG.NO_CONNECTED_TARGET)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
|
|
||||||
def set_target_mode(self, port, gid):
|
def set_target_mode(self, port, gid):
|
||||||
@ -782,7 +799,7 @@ class HBSDCommon():
|
|||||||
if port not in targets['info'] or not targets['info'][port]:
|
if port not in targets['info'] or not targets['info'][port]:
|
||||||
target_name, gid = self.create_target_to_storage(
|
target_name, gid = self.create_target_to_storage(
|
||||||
port, connector, hba_ids)
|
port, connector, hba_ids)
|
||||||
utils.output_log(
|
self.output_log(
|
||||||
MSG.OBJECT_CREATED,
|
MSG.OBJECT_CREATED,
|
||||||
object='a target',
|
object='a target',
|
||||||
details='port: %(port)s, gid: %(gid)s, target_name: '
|
details='port: %(port)s, gid: %(gid)s, target_name: '
|
||||||
@ -821,14 +838,14 @@ class HBSDCommon():
|
|||||||
self.create_target(
|
self.create_target(
|
||||||
targets, port, connector, active_hba_ids)
|
targets, port, connector, active_hba_ids)
|
||||||
except exception.VolumeDriverException:
|
except exception.VolumeDriverException:
|
||||||
utils.output_log(
|
self.output_log(
|
||||||
self.driver_info['msg_id']['target'], port=port)
|
self.driver_info['msg_id']['target'], port=port)
|
||||||
|
|
||||||
# When other threads created a host group at same time, need to
|
# When other threads created a host group at same time, need to
|
||||||
# re-find targets.
|
# re-find targets.
|
||||||
if not targets['list']:
|
if not targets['list']:
|
||||||
self.find_targets_from_storage(
|
self.find_targets_from_storage(
|
||||||
targets, connector, targets['info'].keys())
|
targets, connector, list(targets['info'].keys()))
|
||||||
|
|
||||||
def get_port_index_to_be_used(self, ports, network_name):
|
def get_port_index_to_be_used(self, ports, network_name):
|
||||||
backend_name = self.conf.safe_get('volume_backend_name')
|
backend_name = self.conf.safe_get('volume_backend_name')
|
||||||
@ -880,21 +897,22 @@ class HBSDCommon():
|
|||||||
"""Check if available storage ports exist."""
|
"""Check if available storage ports exist."""
|
||||||
if (self.conf.hitachi_target_ports and
|
if (self.conf.hitachi_target_ports and
|
||||||
not self.storage_info['controller_ports']):
|
not self.storage_info['controller_ports']):
|
||||||
msg = utils.output_log(MSG.RESOURCE_NOT_FOUND,
|
msg = self.output_log(MSG.RESOURCE_NOT_FOUND,
|
||||||
resource="Target ports")
|
resource="Target ports")
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
if (self.conf.hitachi_compute_target_ports and
|
if (self.conf.hitachi_compute_target_ports and
|
||||||
not self.storage_info['compute_ports']):
|
not self.storage_info['compute_ports']):
|
||||||
msg = utils.output_log(MSG.RESOURCE_NOT_FOUND,
|
msg = self.output_log(MSG.RESOURCE_NOT_FOUND,
|
||||||
resource="Compute target ports")
|
resource="Compute target ports")
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
utils.output_log(MSG.SET_CONFIG_VALUE, object='target port list',
|
self.output_log(MSG.SET_CONFIG_VALUE, object='target port list',
|
||||||
value=self.storage_info['controller_ports'])
|
value=self.storage_info['controller_ports'])
|
||||||
utils.output_log(MSG.SET_CONFIG_VALUE,
|
self.output_log(MSG.SET_CONFIG_VALUE,
|
||||||
object='compute target port list',
|
object='compute target port list',
|
||||||
value=self.storage_info['compute_ports'])
|
value=self.storage_info['compute_ports'])
|
||||||
|
|
||||||
def attach_ldev(self, volume, ldev, connector, is_snapshot, targets):
|
def attach_ldev(
|
||||||
|
self, volume, ldev, connector, is_snapshot, targets, lun=None):
|
||||||
"""Initialize connection between the server and the volume."""
|
"""Initialize connection between the server and the volume."""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
@ -952,7 +970,8 @@ class HBSDCommon():
|
|||||||
@coordination.synchronized(
|
@coordination.synchronized(
|
||||||
'{self.driver_info[driver_file_prefix]}-host-'
|
'{self.driver_info[driver_file_prefix]}-host-'
|
||||||
'{self.conf.hitachi_storage_id}-{connector[host]}')
|
'{self.conf.hitachi_storage_id}-{connector[host]}')
|
||||||
def initialize_connection(self, volume, connector, is_snapshot=False):
|
def initialize_connection(
|
||||||
|
self, volume, connector, is_snapshot=False, lun=None):
|
||||||
"""Initialize connection between the server and the volume."""
|
"""Initialize connection between the server and the volume."""
|
||||||
targets = {
|
targets = {
|
||||||
'info': {},
|
'info': {},
|
||||||
@ -961,14 +980,14 @@ class HBSDCommon():
|
|||||||
'iqns': {},
|
'iqns': {},
|
||||||
'target_map': {},
|
'target_map': {},
|
||||||
}
|
}
|
||||||
ldev = utils.get_ldev(volume)
|
ldev = self.get_ldev(volume)
|
||||||
if ldev is None:
|
if ldev is None:
|
||||||
msg = utils.output_log(MSG.INVALID_LDEV_FOR_CONNECTION,
|
msg = self.output_log(MSG.INVALID_LDEV_FOR_CONNECTION,
|
||||||
volume_id=volume['id'])
|
volume_id=volume['id'])
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
|
|
||||||
target_lun = self.attach_ldev(
|
target_lun = self.attach_ldev(
|
||||||
volume, ldev, connector, is_snapshot, targets)
|
volume, ldev, connector, is_snapshot, targets, lun)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'driver_volume_type': self.driver_info['volume_type'],
|
'driver_volume_type': self.driver_info['volume_type'],
|
||||||
@ -996,10 +1015,10 @@ class HBSDCommon():
|
|||||||
|
|
||||||
def terminate_connection(self, volume, connector):
|
def terminate_connection(self, volume, connector):
|
||||||
"""Terminate connection between the server and the volume."""
|
"""Terminate connection between the server and the volume."""
|
||||||
ldev = utils.get_ldev(volume)
|
ldev = self.get_ldev(volume)
|
||||||
if ldev is None:
|
if ldev is None:
|
||||||
utils.output_log(MSG.INVALID_LDEV_FOR_UNMAPPING,
|
self.output_log(MSG.INVALID_LDEV_FOR_UNMAPPING,
|
||||||
volume_id=volume['id'])
|
volume_id=volume['id'])
|
||||||
return
|
return
|
||||||
# If a fake connector is generated by nova when the host
|
# If a fake connector is generated by nova when the host
|
||||||
# is down, then the connector will not have a host property,
|
# is down, then the connector will not have a host property,
|
||||||
@ -1008,7 +1027,7 @@ class HBSDCommon():
|
|||||||
if 'host' not in connector:
|
if 'host' not in connector:
|
||||||
port_hostgroup_map = self.get_port_hostgroup_map(ldev)
|
port_hostgroup_map = self.get_port_hostgroup_map(ldev)
|
||||||
if not port_hostgroup_map:
|
if not port_hostgroup_map:
|
||||||
utils.output_log(MSG.NO_LUN, ldev=ldev)
|
self.output_log(MSG.NO_LUN, ldev=ldev)
|
||||||
return
|
return
|
||||||
self.set_terminate_target(connector, port_hostgroup_map)
|
self.set_terminate_target(connector, port_hostgroup_map)
|
||||||
|
|
||||||
@ -1031,21 +1050,17 @@ class HBSDCommon():
|
|||||||
'data': {'target_wwn': target_wwn}}
|
'data': {'target_wwn': target_wwn}}
|
||||||
return inner(self, volume, connector)
|
return inner(self, volume, connector)
|
||||||
|
|
||||||
def get_volume_extra_specs(self, volume):
|
|
||||||
if volume is None:
|
|
||||||
return {}
|
|
||||||
type_id = volume.get('volume_type_id')
|
|
||||||
if type_id is None:
|
|
||||||
return {}
|
|
||||||
|
|
||||||
return volume_types.get_volume_type_extra_specs(type_id)
|
|
||||||
|
|
||||||
def filter_target_ports(self, target_ports, volume, is_snapshot=False):
|
def filter_target_ports(self, target_ports, volume, is_snapshot=False):
|
||||||
specs = self.get_volume_extra_specs(volume) if volume else None
|
specs = self.get_volume_extra_specs(volume) if volume else None
|
||||||
if not specs:
|
if not specs:
|
||||||
return target_ports
|
return target_ports
|
||||||
if self.driver_info.get('driver_dir_name'):
|
if self.driver_info.get('driver_dir_name'):
|
||||||
tps_name = self.driver_info['driver_dir_name'] + ':target_ports'
|
if getattr(self, 'is_secondary', False):
|
||||||
|
tps_name = self.driver_info[
|
||||||
|
'driver_dir_name'] + ':remote_target_ports'
|
||||||
|
else:
|
||||||
|
tps_name = self.driver_info[
|
||||||
|
'driver_dir_name'] + ':target_ports'
|
||||||
else:
|
else:
|
||||||
return target_ports
|
return target_ports
|
||||||
|
|
||||||
@ -1059,7 +1074,7 @@ class HBSDCommon():
|
|||||||
volume = volume['volume']
|
volume = volume['volume']
|
||||||
for port in tpsset:
|
for port in tpsset:
|
||||||
if port not in target_ports:
|
if port not in target_ports:
|
||||||
utils.output_log(
|
self.output_log(
|
||||||
MSG.INVALID_EXTRA_SPEC_KEY_PORT,
|
MSG.INVALID_EXTRA_SPEC_KEY_PORT,
|
||||||
port=port, target_ports_param=tps_name,
|
port=port, target_ports_param=tps_name,
|
||||||
volume_type=volume['volume_type']['name'])
|
volume_type=volume['volume_type']['name'])
|
||||||
@ -1071,7 +1086,7 @@ class HBSDCommon():
|
|||||||
|
|
||||||
def unmanage_snapshot(self, snapshot):
|
def unmanage_snapshot(self, snapshot):
|
||||||
"""Output error message and raise NotImplementedError."""
|
"""Output error message and raise NotImplementedError."""
|
||||||
utils.output_log(
|
self.output_log(
|
||||||
MSG.SNAPSHOT_UNMANAGE_FAILED, snapshot_id=snapshot['id'])
|
MSG.SNAPSHOT_UNMANAGE_FAILED, snapshot_id=snapshot['id'])
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
@ -1093,8 +1108,8 @@ class HBSDCommon():
|
|||||||
|
|
||||||
def revert_to_snapshot(self, volume, snapshot):
|
def revert_to_snapshot(self, volume, snapshot):
|
||||||
"""Rollback the specified snapshot."""
|
"""Rollback the specified snapshot."""
|
||||||
pvol = utils.get_ldev(volume)
|
pvol = self.get_ldev(volume)
|
||||||
svol = utils.get_ldev(snapshot)
|
svol = self.get_ldev(snapshot)
|
||||||
if (pvol is not None and
|
if (pvol is not None and
|
||||||
svol is not None and
|
svol is not None and
|
||||||
self.has_snap_pair(pvol, svol)):
|
self.has_snap_pair(pvol, svol)):
|
||||||
@ -1121,20 +1136,65 @@ class HBSDCommon():
|
|||||||
def delete_group_snapshot(self, group_snapshot, snapshots):
|
def delete_group_snapshot(self, group_snapshot, snapshots):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def output_log(self, msg_enum, **kwargs):
|
||||||
|
if self.storage_id is not None:
|
||||||
|
return utils.output_log(
|
||||||
|
msg_enum, storage_id=self.storage_id, **kwargs)
|
||||||
|
else:
|
||||||
|
return utils.output_log(msg_enum, **kwargs)
|
||||||
|
|
||||||
|
def get_ldev(self, obj, both=False):
|
||||||
|
if not obj:
|
||||||
|
return None
|
||||||
|
provider_location = obj.get('provider_location')
|
||||||
|
if not provider_location:
|
||||||
|
return None
|
||||||
|
if provider_location.isdigit():
|
||||||
|
return int(provider_location)
|
||||||
|
if provider_location.startswith('{'):
|
||||||
|
loc = json.loads(provider_location)
|
||||||
|
if isinstance(loc, dict):
|
||||||
|
if getattr(self, 'is_primary', False) or (
|
||||||
|
hasattr(self, 'primary_storage_id') and not both):
|
||||||
|
return None if 'pldev' not in loc else int(loc['pldev'])
|
||||||
|
elif getattr(self, 'is_secondary', False):
|
||||||
|
return None if 'sldev' not in loc else int(loc['sldev'])
|
||||||
|
if hasattr(self, 'primary_storage_id'):
|
||||||
|
return {key: loc.get(key) for key in ['pldev', 'sldev']}
|
||||||
|
return None
|
||||||
|
|
||||||
|
def check_opt_value(self, conf, names):
|
||||||
|
"""Check if the parameter names and values are valid."""
|
||||||
|
for name in names:
|
||||||
|
try:
|
||||||
|
getattr(conf, name)
|
||||||
|
except (cfg.NoSuchOptError, cfg.ConfigFileValueError):
|
||||||
|
with excutils.save_and_reraise_exception():
|
||||||
|
self.output_log(MSG.INVALID_PARAMETER, param=name)
|
||||||
|
|
||||||
def check_opts(self, conf, opts):
|
def check_opts(self, conf, opts):
|
||||||
"""Check if the specified configuration is valid."""
|
"""Check if the specified configuration is valid."""
|
||||||
names = []
|
names = []
|
||||||
for opt in opts:
|
for opt in opts:
|
||||||
if opt.required and not conf.safe_get(opt.name):
|
if opt.required and not conf.safe_get(opt.name):
|
||||||
msg = utils.output_log(MSG.INVALID_PARAMETER, param=opt.name)
|
msg = self.output_log(MSG.INVALID_PARAMETER, param=opt.name)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
names.append(opt.name)
|
names.append(opt.name)
|
||||||
utils.check_opt_value(conf, names)
|
self.check_opt_value(conf, names)
|
||||||
|
|
||||||
|
def get_volume_extra_specs(self, volume):
|
||||||
|
if volume is None:
|
||||||
|
return {}
|
||||||
|
type_id = volume.get('volume_type_id', None)
|
||||||
|
if type_id is None:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
return volume_types.get_volume_type_extra_specs(type_id)
|
||||||
|
|
||||||
def require_target_existed(self, targets):
|
def require_target_existed(self, targets):
|
||||||
"""Check if the target list includes one or more members."""
|
"""Check if the target list includes one or more members."""
|
||||||
if not targets['list']:
|
if not targets['list']:
|
||||||
msg = utils.output_log(MSG.NO_CONNECTED_TARGET)
|
msg = self.output_log(MSG.NO_CONNECTED_TARGET)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
|
|
||||||
def raise_error(self, msg):
|
def raise_error(self, msg):
|
||||||
|
@ -21,6 +21,7 @@ from oslo_utils import excutils
|
|||||||
from cinder import interface
|
from cinder import interface
|
||||||
from cinder.volume import driver
|
from cinder.volume import driver
|
||||||
from cinder.volume.drivers.hitachi import hbsd_common as common
|
from cinder.volume.drivers.hitachi import hbsd_common as common
|
||||||
|
from cinder.volume.drivers.hitachi import hbsd_replication as replication
|
||||||
from cinder.volume.drivers.hitachi import hbsd_rest as rest
|
from cinder.volume.drivers.hitachi import hbsd_rest as rest
|
||||||
from cinder.volume.drivers.hitachi import hbsd_rest_fc as rest_fc
|
from cinder.volume.drivers.hitachi import hbsd_rest_fc as rest_fc
|
||||||
from cinder.volume.drivers.hitachi import hbsd_utils as utils
|
from cinder.volume.drivers.hitachi import hbsd_utils as utils
|
||||||
@ -51,6 +52,8 @@ _DRIVER_INFO = {
|
|||||||
'nvol_ldev_type': utils.NVOL_LDEV_TYPE,
|
'nvol_ldev_type': utils.NVOL_LDEV_TYPE,
|
||||||
'target_iqn_suffix': utils.TARGET_IQN_SUFFIX,
|
'target_iqn_suffix': utils.TARGET_IQN_SUFFIX,
|
||||||
'pair_attr': utils.PAIR_ATTR,
|
'pair_attr': utils.PAIR_ATTR,
|
||||||
|
'mirror_attr': utils.MIRROR_ATTR,
|
||||||
|
'driver_impl_class': rest_fc.HBSDRESTFC,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -72,8 +75,9 @@ class HBSDFCDriver(driver.FibreChannelDriver):
|
|||||||
2.2.2 - Add Target Port Assignment.
|
2.2.2 - Add Target Port Assignment.
|
||||||
2.2.3 - Add port scheduler.
|
2.2.3 - Add port scheduler.
|
||||||
2.3.0 - Support multi pool.
|
2.3.0 - Support multi pool.
|
||||||
2.3.1 - Update retype and support storage assisted migration.
|
2.3.1 - Add specifies format of the names HostGroups/iSCSI Targets.
|
||||||
2.3.2 - Add specifies format of the names HostGroups/iSCSI Targets.
|
2.3.2 - Add GAD volume support.
|
||||||
|
2.3.3 - Update retype and support storage assisted migration.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -82,6 +86,8 @@ class HBSDFCDriver(driver.FibreChannelDriver):
|
|||||||
# ThirdPartySystems wiki page
|
# ThirdPartySystems wiki page
|
||||||
CI_WIKI_NAME = utils.CI_WIKI_NAME
|
CI_WIKI_NAME = utils.CI_WIKI_NAME
|
||||||
|
|
||||||
|
driver_info = dict(_DRIVER_INFO)
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
"""Initialize instance variables."""
|
"""Initialize instance variables."""
|
||||||
utils.output_log(MSG.DRIVER_INITIALIZATION_START,
|
utils.output_log(MSG.DRIVER_INITIALIZATION_START,
|
||||||
@ -90,14 +96,25 @@ class HBSDFCDriver(driver.FibreChannelDriver):
|
|||||||
super(HBSDFCDriver, self).__init__(*args, **kwargs)
|
super(HBSDFCDriver, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
self.configuration.append_config_values(common.COMMON_VOLUME_OPTS)
|
self.configuration.append_config_values(common.COMMON_VOLUME_OPTS)
|
||||||
|
self.configuration.append_config_values(common.COMMON_PAIR_OPTS)
|
||||||
self.configuration.append_config_values(common.COMMON_PORT_OPTS)
|
self.configuration.append_config_values(common.COMMON_PORT_OPTS)
|
||||||
self.configuration.append_config_values(common.COMMON_NAME_OPTS)
|
self.configuration.append_config_values(common.COMMON_NAME_OPTS)
|
||||||
self.configuration.append_config_values(rest_fc.FC_VOLUME_OPTS)
|
self.configuration.append_config_values(rest_fc.FC_VOLUME_OPTS)
|
||||||
|
self.configuration.append_config_values(
|
||||||
|
replication.COMMON_MIRROR_OPTS)
|
||||||
os.environ['LANG'] = 'C'
|
os.environ['LANG'] = 'C'
|
||||||
self.common = self._init_common(self.configuration, kwargs.get('db'))
|
kwargs.setdefault('driver_info', _DRIVER_INFO)
|
||||||
|
self.driver_info = dict(kwargs['driver_info'])
|
||||||
def _init_common(self, conf, db):
|
self.driver_info['driver_class'] = self.__class__
|
||||||
return rest_fc.HBSDRESTFC(conf, _DRIVER_INFO, db)
|
if self.configuration.safe_get('hitachi_mirror_storage_id'):
|
||||||
|
self.common = replication.HBSDREPLICATION(
|
||||||
|
self.configuration, self.driver_info, kwargs.get('db'))
|
||||||
|
elif not hasattr(self, '_init_common'):
|
||||||
|
self.common = self.driver_info['driver_impl_class'](
|
||||||
|
self.configuration, self.driver_info, kwargs.get('db'))
|
||||||
|
else:
|
||||||
|
self.common = self._init_common(
|
||||||
|
self.configuration, kwargs.get('db'))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_driver_options():
|
def get_driver_options():
|
||||||
@ -108,9 +125,17 @@ class HBSDFCDriver(driver.FibreChannelDriver):
|
|||||||
'san_api_port', ]))
|
'san_api_port', ]))
|
||||||
return (common.COMMON_VOLUME_OPTS +
|
return (common.COMMON_VOLUME_OPTS +
|
||||||
common.COMMON_PORT_OPTS +
|
common.COMMON_PORT_OPTS +
|
||||||
|
common.COMMON_PAIR_OPTS +
|
||||||
common.COMMON_NAME_OPTS +
|
common.COMMON_NAME_OPTS +
|
||||||
rest.REST_VOLUME_OPTS +
|
rest.REST_VOLUME_OPTS +
|
||||||
|
rest.REST_PAIR_OPTS +
|
||||||
rest_fc.FC_VOLUME_OPTS +
|
rest_fc.FC_VOLUME_OPTS +
|
||||||
|
replication._REP_OPTS +
|
||||||
|
replication.COMMON_MIRROR_OPTS +
|
||||||
|
replication.ISCSI_MIRROR_OPTS +
|
||||||
|
replication.REST_MIRROR_OPTS +
|
||||||
|
replication.REST_MIRROR_API_OPTS +
|
||||||
|
replication.REST_MIRROR_SSL_OPTS +
|
||||||
additional_opts)
|
additional_opts)
|
||||||
|
|
||||||
def check_for_setup_error(self):
|
def check_for_setup_error(self):
|
||||||
@ -187,7 +212,7 @@ class HBSDFCDriver(driver.FibreChannelDriver):
|
|||||||
@volume_utils.trace
|
@volume_utils.trace
|
||||||
def manage_existing_get_size(self, volume, existing_ref):
|
def manage_existing_get_size(self, volume, existing_ref):
|
||||||
"""Return the size[GB] of the specified volume."""
|
"""Return the size[GB] of the specified volume."""
|
||||||
return self.common.manage_existing_get_size(existing_ref)
|
return self.common.manage_existing_get_size(volume, existing_ref)
|
||||||
|
|
||||||
@volume_utils.trace
|
@volume_utils.trace
|
||||||
def unmanage(self, volume):
|
def unmanage(self, volume):
|
||||||
|
@ -21,6 +21,7 @@ from oslo_utils import excutils
|
|||||||
from cinder import interface
|
from cinder import interface
|
||||||
from cinder.volume import driver
|
from cinder.volume import driver
|
||||||
from cinder.volume.drivers.hitachi import hbsd_common as common
|
from cinder.volume.drivers.hitachi import hbsd_common as common
|
||||||
|
from cinder.volume.drivers.hitachi import hbsd_replication as replication
|
||||||
from cinder.volume.drivers.hitachi import hbsd_rest as rest
|
from cinder.volume.drivers.hitachi import hbsd_rest as rest
|
||||||
from cinder.volume.drivers.hitachi import hbsd_rest_iscsi as rest_iscsi
|
from cinder.volume.drivers.hitachi import hbsd_rest_iscsi as rest_iscsi
|
||||||
from cinder.volume.drivers.hitachi import hbsd_utils as utils
|
from cinder.volume.drivers.hitachi import hbsd_utils as utils
|
||||||
@ -51,6 +52,8 @@ _DRIVER_INFO = {
|
|||||||
'nvol_ldev_type': utils.NVOL_LDEV_TYPE,
|
'nvol_ldev_type': utils.NVOL_LDEV_TYPE,
|
||||||
'target_iqn_suffix': utils.TARGET_IQN_SUFFIX,
|
'target_iqn_suffix': utils.TARGET_IQN_SUFFIX,
|
||||||
'pair_attr': utils.PAIR_ATTR,
|
'pair_attr': utils.PAIR_ATTR,
|
||||||
|
'mirror_attr': utils.MIRROR_ATTR,
|
||||||
|
'driver_impl_class': rest_iscsi.HBSDRESTISCSI,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -72,8 +75,9 @@ class HBSDISCSIDriver(driver.ISCSIDriver):
|
|||||||
2.2.2 - Add Target Port Assignment.
|
2.2.2 - Add Target Port Assignment.
|
||||||
2.2.3 - Add port scheduler.
|
2.2.3 - Add port scheduler.
|
||||||
2.3.0 - Support multi pool.
|
2.3.0 - Support multi pool.
|
||||||
2.3.1 - Update retype and support storage assisted migration.
|
2.3.1 - Add specifies format of the names HostGroups/iSCSI Targets.
|
||||||
2.3.2 - Add specifies format of the names HostGroups/iSCSI Targets.
|
2.3.2 - Add GAD volume support.
|
||||||
|
2.3.3 - Update retype and support storage assisted migration.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -82,6 +86,8 @@ class HBSDISCSIDriver(driver.ISCSIDriver):
|
|||||||
# ThirdPartySystems wiki page
|
# ThirdPartySystems wiki page
|
||||||
CI_WIKI_NAME = utils.CI_WIKI_NAME
|
CI_WIKI_NAME = utils.CI_WIKI_NAME
|
||||||
|
|
||||||
|
driver_info = dict(_DRIVER_INFO)
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
"""Initialize instance variables."""
|
"""Initialize instance variables."""
|
||||||
utils.output_log(MSG.DRIVER_INITIALIZATION_START,
|
utils.output_log(MSG.DRIVER_INITIALIZATION_START,
|
||||||
@ -90,12 +96,23 @@ class HBSDISCSIDriver(driver.ISCSIDriver):
|
|||||||
super(HBSDISCSIDriver, self).__init__(*args, **kwargs)
|
super(HBSDISCSIDriver, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
self.configuration.append_config_values(common.COMMON_VOLUME_OPTS)
|
self.configuration.append_config_values(common.COMMON_VOLUME_OPTS)
|
||||||
|
self.configuration.append_config_values(common.COMMON_PAIR_OPTS)
|
||||||
self.configuration.append_config_values(common.COMMON_NAME_OPTS)
|
self.configuration.append_config_values(common.COMMON_NAME_OPTS)
|
||||||
|
self.configuration.append_config_values(
|
||||||
|
replication.COMMON_MIRROR_OPTS)
|
||||||
os.environ['LANG'] = 'C'
|
os.environ['LANG'] = 'C'
|
||||||
self.common = self._init_common(self.configuration, kwargs.get('db'))
|
kwargs.setdefault('driver_info', _DRIVER_INFO)
|
||||||
|
self.driver_info = dict(kwargs['driver_info'])
|
||||||
def _init_common(self, conf, db):
|
self.driver_info['driver_class'] = self.__class__
|
||||||
return rest_iscsi.HBSDRESTISCSI(conf, _DRIVER_INFO, db)
|
if self.configuration.safe_get('hitachi_mirror_storage_id'):
|
||||||
|
self.common = replication.HBSDREPLICATION(
|
||||||
|
self.configuration, self.driver_info, kwargs.get('db'))
|
||||||
|
elif not hasattr(self, '_init_common'):
|
||||||
|
self.common = self.driver_info['driver_impl_class'](
|
||||||
|
self.configuration, self.driver_info, kwargs.get('db'))
|
||||||
|
else:
|
||||||
|
self.common = self._init_common(
|
||||||
|
self.configuration, kwargs.get('db'))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_driver_options():
|
def get_driver_options():
|
||||||
@ -105,8 +122,16 @@ class HBSDISCSIDriver(driver.ISCSIDriver):
|
|||||||
['driver_ssl_cert_verify', 'driver_ssl_cert_path',
|
['driver_ssl_cert_verify', 'driver_ssl_cert_path',
|
||||||
'san_api_port', ]))
|
'san_api_port', ]))
|
||||||
return (common.COMMON_VOLUME_OPTS +
|
return (common.COMMON_VOLUME_OPTS +
|
||||||
|
common.COMMON_PAIR_OPTS +
|
||||||
common.COMMON_NAME_OPTS +
|
common.COMMON_NAME_OPTS +
|
||||||
rest.REST_VOLUME_OPTS +
|
rest.REST_VOLUME_OPTS +
|
||||||
|
rest.REST_PAIR_OPTS +
|
||||||
|
replication._REP_OPTS +
|
||||||
|
replication.COMMON_MIRROR_OPTS +
|
||||||
|
replication.ISCSI_MIRROR_OPTS +
|
||||||
|
replication.REST_MIRROR_OPTS +
|
||||||
|
replication.REST_MIRROR_API_OPTS +
|
||||||
|
replication.REST_MIRROR_SSL_OPTS +
|
||||||
additional_opts)
|
additional_opts)
|
||||||
|
|
||||||
def check_for_setup_error(self):
|
def check_for_setup_error(self):
|
||||||
@ -183,7 +208,7 @@ class HBSDISCSIDriver(driver.ISCSIDriver):
|
|||||||
@volume_utils.trace
|
@volume_utils.trace
|
||||||
def manage_existing_get_size(self, volume, existing_ref):
|
def manage_existing_get_size(self, volume, existing_ref):
|
||||||
"""Return the size[GB] of the specified volume."""
|
"""Return the size[GB] of the specified volume."""
|
||||||
return self.common.manage_existing_get_size(existing_ref)
|
return self.common.manage_existing_get_size(volume, existing_ref)
|
||||||
|
|
||||||
@volume_utils.trace
|
@volume_utils.trace
|
||||||
def unmanage(self, volume):
|
def unmanage(self, volume):
|
||||||
|
972
cinder/volume/drivers/hitachi/hbsd_replication.py
Normal file
972
cinder/volume/drivers/hitachi/hbsd_replication.py
Normal file
@ -0,0 +1,972 @@
|
|||||||
|
# Copyright (C) 2022, 2023, Hitachi, Ltd.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
"""replication module for Hitachi HBSD Driver."""
|
||||||
|
|
||||||
|
import json
|
||||||
|
|
||||||
|
from eventlet import greenthread
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
from oslo_utils import excutils
|
||||||
|
from oslo_utils import timeutils
|
||||||
|
|
||||||
|
from cinder import exception
|
||||||
|
from cinder.volume.drivers.hitachi import hbsd_common as common
|
||||||
|
from cinder.volume.drivers.hitachi import hbsd_rest as rest
|
||||||
|
from cinder.volume.drivers.hitachi import hbsd_utils as utils
|
||||||
|
from cinder.zonemanager import utils as fczm_utils
|
||||||
|
|
||||||
|
_REP_STATUS_CHECK_SHORT_INTERVAL = 5
|
||||||
|
_REP_STATUS_CHECK_LONG_INTERVAL = 10 * 60
|
||||||
|
_REP_STATUS_CHECK_TIMEOUT = 24 * 60 * 60
|
||||||
|
|
||||||
|
_WAIT_PAIR = 1
|
||||||
|
_WAIT_PSUS = 2
|
||||||
|
|
||||||
|
_REP_OPTS = [
|
||||||
|
cfg.IntOpt(
|
||||||
|
'hitachi_replication_status_check_short_interval',
|
||||||
|
default=_REP_STATUS_CHECK_SHORT_INTERVAL,
|
||||||
|
help='Initial interval at which remote replication pair status is '
|
||||||
|
'checked'),
|
||||||
|
cfg.IntOpt(
|
||||||
|
'hitachi_replication_status_check_long_interval',
|
||||||
|
default=_REP_STATUS_CHECK_LONG_INTERVAL,
|
||||||
|
help='Interval at which remote replication pair status is checked. '
|
||||||
|
'This parameter is applied if the status has not changed to the '
|
||||||
|
'expected status after the time indicated by this parameter has '
|
||||||
|
'elapsed.'),
|
||||||
|
cfg.IntOpt(
|
||||||
|
'hitachi_replication_status_check_timeout',
|
||||||
|
default=_REP_STATUS_CHECK_TIMEOUT,
|
||||||
|
help='Maximum wait time before the remote replication pair status '
|
||||||
|
'changes to the expected status'),
|
||||||
|
cfg.IntOpt(
|
||||||
|
'hitachi_path_group_id',
|
||||||
|
default=0, min=0, max=255,
|
||||||
|
help='Path group ID assigned to the remote connection for remote '
|
||||||
|
'replication'),
|
||||||
|
cfg.IntOpt(
|
||||||
|
'hitachi_quorum_disk_id',
|
||||||
|
min=0, max=31,
|
||||||
|
help='ID of the Quorum disk used for global-active device'),
|
||||||
|
cfg.IntOpt(
|
||||||
|
'hitachi_replication_copy_speed',
|
||||||
|
min=1, max=15, default=3,
|
||||||
|
help='Remote copy speed of storage system. 1 or 2 indicates '
|
||||||
|
'low speed, 3 indicates middle speed, and a value between 4 and '
|
||||||
|
'15 indicates high speed.'),
|
||||||
|
cfg.BoolOpt(
|
||||||
|
'hitachi_set_mirror_reserve_attribute',
|
||||||
|
default=True,
|
||||||
|
help='Whether or not to set the mirror reserve attribute'),
|
||||||
|
cfg.IntOpt(
|
||||||
|
'hitachi_replication_number',
|
||||||
|
default=0, min=0, max=255,
|
||||||
|
help='Instance number for REST API'),
|
||||||
|
]
|
||||||
|
|
||||||
|
COMMON_MIRROR_OPTS = [
|
||||||
|
cfg.StrOpt(
|
||||||
|
'hitachi_mirror_storage_id',
|
||||||
|
default=None,
|
||||||
|
help='ID of secondary storage system'),
|
||||||
|
cfg.StrOpt(
|
||||||
|
'hitachi_mirror_pool',
|
||||||
|
default=None,
|
||||||
|
help='Pool of secondary storage system'),
|
||||||
|
cfg.StrOpt(
|
||||||
|
'hitachi_mirror_snap_pool',
|
||||||
|
default=None,
|
||||||
|
help='Thin pool of secondary storage system'),
|
||||||
|
cfg.StrOpt(
|
||||||
|
'hitachi_mirror_ldev_range',
|
||||||
|
default=None,
|
||||||
|
help='Logical device range of secondary storage system'),
|
||||||
|
cfg.ListOpt(
|
||||||
|
'hitachi_mirror_target_ports',
|
||||||
|
default=[],
|
||||||
|
help='Target port names for host group or iSCSI target'),
|
||||||
|
cfg.ListOpt(
|
||||||
|
'hitachi_mirror_compute_target_ports',
|
||||||
|
default=[],
|
||||||
|
help=(
|
||||||
|
'Target port names of compute node '
|
||||||
|
'for host group or iSCSI target')),
|
||||||
|
cfg.IntOpt(
|
||||||
|
'hitachi_mirror_pair_target_number',
|
||||||
|
min=0, max=99, default=0,
|
||||||
|
help='Pair target name of the host group or iSCSI target'),
|
||||||
|
]
|
||||||
|
|
||||||
|
ISCSI_MIRROR_OPTS = [
|
||||||
|
cfg.BoolOpt(
|
||||||
|
'hitachi_mirror_use_chap_auth',
|
||||||
|
default=False,
|
||||||
|
help='Whether or not to use iSCSI authentication'),
|
||||||
|
cfg.StrOpt(
|
||||||
|
'hitachi_mirror_auth_user',
|
||||||
|
default=None,
|
||||||
|
help='iSCSI authentication username'),
|
||||||
|
cfg.StrOpt(
|
||||||
|
'hitachi_mirror_auth_password',
|
||||||
|
default=None,
|
||||||
|
secret=True,
|
||||||
|
help='iSCSI authentication password'),
|
||||||
|
]
|
||||||
|
|
||||||
|
REST_MIRROR_OPTS = [
|
||||||
|
cfg.ListOpt(
|
||||||
|
'hitachi_mirror_rest_pair_target_ports',
|
||||||
|
default=[],
|
||||||
|
help='Target port names for pair of the host group or iSCSI target'),
|
||||||
|
]
|
||||||
|
|
||||||
|
REST_MIRROR_API_OPTS = [
|
||||||
|
cfg.StrOpt(
|
||||||
|
'hitachi_mirror_rest_user',
|
||||||
|
default=None,
|
||||||
|
help='Username of secondary storage system for REST API'),
|
||||||
|
cfg.StrOpt(
|
||||||
|
'hitachi_mirror_rest_password',
|
||||||
|
default=None,
|
||||||
|
secret=True,
|
||||||
|
help='Password of secondary storage system for REST API'),
|
||||||
|
cfg.StrOpt(
|
||||||
|
'hitachi_mirror_rest_api_ip',
|
||||||
|
default=None,
|
||||||
|
help='IP address of REST API server'),
|
||||||
|
cfg.PortOpt(
|
||||||
|
'hitachi_mirror_rest_api_port',
|
||||||
|
default=443,
|
||||||
|
help='Port number of REST API server'),
|
||||||
|
]
|
||||||
|
|
||||||
|
REST_MIRROR_SSL_OPTS = [
|
||||||
|
cfg.BoolOpt('hitachi_mirror_ssl_cert_verify',
|
||||||
|
default=False,
|
||||||
|
help='If set to True the http client will validate the SSL '
|
||||||
|
'certificate of the backend endpoint.'),
|
||||||
|
cfg.StrOpt('hitachi_mirror_ssl_cert_path',
|
||||||
|
help='Can be used to specify a non default path to a '
|
||||||
|
'CA_BUNDLE file or directory with certificates of '
|
||||||
|
'trusted CAs, which will be used to validate the backend'),
|
||||||
|
]
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.register_opts(_REP_OPTS)
|
||||||
|
CONF.register_opts(COMMON_MIRROR_OPTS)
|
||||||
|
CONF.register_opts(ISCSI_MIRROR_OPTS)
|
||||||
|
CONF.register_opts(REST_MIRROR_OPTS)
|
||||||
|
CONF.register_opts(REST_MIRROR_API_OPTS)
|
||||||
|
CONF.register_opts(REST_MIRROR_SSL_OPTS)
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
MSG = utils.HBSDMsg
|
||||||
|
|
||||||
|
|
||||||
|
def _pack_rep_provider_location(pldev=None, sldev=None, rep_type=None):
|
||||||
|
provider_location = {}
|
||||||
|
if pldev is not None:
|
||||||
|
provider_location['pldev'] = pldev
|
||||||
|
if sldev is not None:
|
||||||
|
provider_location['sldev'] = sldev
|
||||||
|
if rep_type is not None:
|
||||||
|
provider_location['remote-copy'] = rep_type
|
||||||
|
return json.dumps(provider_location)
|
||||||
|
|
||||||
|
|
||||||
|
def _delays(short_interval, long_interval, timeout):
|
||||||
|
start_time = timeutils.utcnow()
|
||||||
|
watch = timeutils.StopWatch()
|
||||||
|
i = 0
|
||||||
|
while True:
|
||||||
|
watch.restart()
|
||||||
|
yield i
|
||||||
|
if utils.timed_out(start_time, timeout):
|
||||||
|
raise StopIteration()
|
||||||
|
watch.stop()
|
||||||
|
interval = long_interval if utils.timed_out(
|
||||||
|
start_time, long_interval) else short_interval
|
||||||
|
idle = max(interval - watch.elapsed(), 0)
|
||||||
|
greenthread.sleep(idle)
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
|
||||||
|
class HBSDREPLICATION(rest.HBSDREST):
|
||||||
|
|
||||||
|
def __init__(self, conf, driverinfo, db):
|
||||||
|
super(HBSDREPLICATION, self).__init__(conf, driverinfo, db)
|
||||||
|
conf.append_config_values(_REP_OPTS)
|
||||||
|
if driverinfo['proto'] == 'iSCSI':
|
||||||
|
conf.append_config_values(ISCSI_MIRROR_OPTS)
|
||||||
|
conf.append_config_values(REST_MIRROR_OPTS)
|
||||||
|
conf.append_config_values(REST_MIRROR_API_OPTS)
|
||||||
|
conf.append_config_values(REST_MIRROR_SSL_OPTS)
|
||||||
|
driver_impl_class = self.driver_info['driver_impl_class']
|
||||||
|
self.primary = driver_impl_class(conf, driverinfo, db)
|
||||||
|
self.rep_primary = self.primary
|
||||||
|
self.rep_primary.is_primary = True
|
||||||
|
self.rep_primary.storage_id = conf.safe_get(
|
||||||
|
self.driver_info['param_prefix'] + '_storage_id') or ''
|
||||||
|
self.primary_storage_id = self.rep_primary.storage_id
|
||||||
|
self.secondary = driver_impl_class(conf, driverinfo, db)
|
||||||
|
self.rep_secondary = self.secondary
|
||||||
|
self.rep_secondary.is_secondary = True
|
||||||
|
self.rep_secondary.storage_id = (
|
||||||
|
conf.safe_get(
|
||||||
|
self.driver_info['param_prefix'] + '_mirror_storage_id') or '')
|
||||||
|
self.secondary_storage_id = self.rep_secondary.storage_id
|
||||||
|
self.instances = self.rep_primary, self.rep_secondary
|
||||||
|
self._LDEV_NAME = self.driver_info['driver_prefix'] + '-LDEV-%d-%d'
|
||||||
|
|
||||||
|
def update_mirror_conf(self, conf, opts):
|
||||||
|
for opt in opts:
|
||||||
|
name = opt.name.replace('hitachi_mirror_', 'hitachi_')
|
||||||
|
try:
|
||||||
|
setattr(conf, name, getattr(conf, opt.name))
|
||||||
|
except Exception:
|
||||||
|
with excutils.save_and_reraise_exception():
|
||||||
|
self.rep_secondary.output_log(
|
||||||
|
MSG.INVALID_PARAMETER, param=opt.name)
|
||||||
|
|
||||||
|
def _replace_with_mirror_conf(self):
|
||||||
|
conf = self.conf
|
||||||
|
new_conf = utils.Config(conf)
|
||||||
|
self.rep_secondary.conf = new_conf
|
||||||
|
self.update_mirror_conf(new_conf, COMMON_MIRROR_OPTS)
|
||||||
|
self.update_mirror_conf(new_conf, REST_MIRROR_OPTS)
|
||||||
|
if self.rep_secondary.driver_info['volume_type'] == 'iscsi':
|
||||||
|
self.update_mirror_conf(new_conf, ISCSI_MIRROR_OPTS)
|
||||||
|
new_conf.san_login = (
|
||||||
|
conf.safe_get(self.driver_info['param_prefix'] +
|
||||||
|
'_mirror_rest_user'))
|
||||||
|
new_conf.san_password = (
|
||||||
|
conf.safe_get(self.driver_info['param_prefix'] +
|
||||||
|
'_mirror_rest_password'))
|
||||||
|
new_conf.san_ip = (
|
||||||
|
conf.safe_get(self.driver_info['param_prefix'] +
|
||||||
|
'_mirror_rest_api_ip'))
|
||||||
|
new_conf.san_api_port = (
|
||||||
|
conf.safe_get(self.driver_info['param_prefix'] +
|
||||||
|
'_mirror_rest_api_port'))
|
||||||
|
new_conf.driver_ssl_cert_verify = (
|
||||||
|
conf.safe_get(self.driver_info['param_prefix'] +
|
||||||
|
'_mirror_ssl_cert_verify'))
|
||||||
|
new_conf.driver_ssl_cert_path = (
|
||||||
|
conf.safe_get(self.driver_info['param_prefix'] +
|
||||||
|
'_mirror_ssl_cert_path'))
|
||||||
|
|
||||||
|
def do_setup(self, context):
|
||||||
|
"""Prepare for the startup of the driver."""
|
||||||
|
self.rep_primary = self.primary
|
||||||
|
self.rep_secondary = self.secondary
|
||||||
|
self.ctxt = context
|
||||||
|
try:
|
||||||
|
self.rep_primary.do_setup(context)
|
||||||
|
self.client = self.rep_primary.client
|
||||||
|
except Exception:
|
||||||
|
self.rep_primary.output_log(
|
||||||
|
MSG.SITE_INITIALIZATION_FAILED, site='primary')
|
||||||
|
self.rep_primary = None
|
||||||
|
try:
|
||||||
|
self._replace_with_mirror_conf()
|
||||||
|
self.rep_secondary.do_setup(context)
|
||||||
|
except Exception:
|
||||||
|
self.rep_secondary.output_log(
|
||||||
|
MSG.SITE_INITIALIZATION_FAILED, site='secondary')
|
||||||
|
if not self.rep_primary:
|
||||||
|
raise
|
||||||
|
self.rep_secondary = None
|
||||||
|
|
||||||
|
def update_volume_stats(self):
|
||||||
|
"""Update properties, capabilities and current states of the driver."""
|
||||||
|
if self.rep_primary:
|
||||||
|
data = self.rep_primary.update_volume_stats()
|
||||||
|
else:
|
||||||
|
data = self.rep_secondary.update_volume_stats()
|
||||||
|
return data
|
||||||
|
|
||||||
|
def _require_rep_primary(self):
|
||||||
|
if not self.rep_primary:
|
||||||
|
msg = utils.output_log(
|
||||||
|
MSG.SITE_NOT_INITIALIZED, storage_id=self.primary_storage_id,
|
||||||
|
site='primary')
|
||||||
|
self.raise_error(msg)
|
||||||
|
|
||||||
|
def _require_rep_secondary(self):
|
||||||
|
if not self.rep_secondary:
|
||||||
|
msg = utils.output_log(
|
||||||
|
MSG.SITE_NOT_INITIALIZED, storage_id=self.secondary_storage_id,
|
||||||
|
site='secondary')
|
||||||
|
self.raise_error(msg)
|
||||||
|
|
||||||
|
def _is_mirror_spec(self, extra_specs):
|
||||||
|
if not extra_specs:
|
||||||
|
return False
|
||||||
|
topology = extra_specs.get(
|
||||||
|
self.driver_info['driver_dir_name'] + ':topology')
|
||||||
|
if topology is None:
|
||||||
|
return False
|
||||||
|
elif topology == 'active_active_mirror_volume':
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
msg = self.rep_primary.output_log(
|
||||||
|
MSG.INVALID_EXTRA_SPEC_KEY,
|
||||||
|
key=self.driver_info['driver_dir_name'] + ':topology',
|
||||||
|
value=topology)
|
||||||
|
self.raise_error(msg)
|
||||||
|
|
||||||
|
def _create_rep_ldev(self, volume, rep_type, pvol=None):
|
||||||
|
"""Create a primary volume and a secondary volume."""
|
||||||
|
pool_id = self.rep_secondary.storage_info['pool_id'][0]
|
||||||
|
ldev_range = self.rep_secondary.storage_info['ldev_range']
|
||||||
|
thread = greenthread.spawn(
|
||||||
|
self.rep_secondary.create_ldev, volume.size, pool_id, ldev_range)
|
||||||
|
if pvol is None:
|
||||||
|
try:
|
||||||
|
pool_id = self.rep_primary.get_pool_id_of_volume(volume)
|
||||||
|
ldev_range = self.rep_primary.storage_info['ldev_range']
|
||||||
|
pvol = self.rep_primary.create_ldev(volume.size,
|
||||||
|
pool_id, ldev_range)
|
||||||
|
except exception.VolumeDriverException:
|
||||||
|
self.rep_primary.output_log(MSG.CREATE_LDEV_FAILED)
|
||||||
|
try:
|
||||||
|
svol = thread.wait()
|
||||||
|
except Exception:
|
||||||
|
self.rep_secondary.output_log(MSG.CREATE_LDEV_FAILED)
|
||||||
|
svol = None
|
||||||
|
if pvol is None or svol is None:
|
||||||
|
for vol, type_, instance in zip((pvol, svol), ('P-VOL', 'S-VOL'),
|
||||||
|
self.instances):
|
||||||
|
if vol is None:
|
||||||
|
msg = instance.output_log(
|
||||||
|
MSG.CREATE_REPLICATION_VOLUME_FAILED,
|
||||||
|
type=type_, rep_type=rep_type,
|
||||||
|
volume_id=volume.id,
|
||||||
|
volume_type=volume.volume_type.name, size=volume.size)
|
||||||
|
else:
|
||||||
|
instance.delete_ldev(vol)
|
||||||
|
self.raise_error(msg)
|
||||||
|
thread = greenthread.spawn(
|
||||||
|
self.rep_secondary.modify_ldev_name,
|
||||||
|
svol, volume['id'].replace("-", ""))
|
||||||
|
try:
|
||||||
|
self.rep_primary.modify_ldev_name(
|
||||||
|
pvol, volume['id'].replace("-", ""))
|
||||||
|
finally:
|
||||||
|
thread.wait()
|
||||||
|
return pvol, svol
|
||||||
|
|
||||||
|
def _create_rep_copy_group_name(self, ldev):
|
||||||
|
return self.driver_info['target_prefix'] + '%s%02XU%02d' % (
|
||||||
|
CONF.my_ip, self.conf.hitachi_replication_number, ldev >> 10)
|
||||||
|
|
||||||
|
def _get_rep_copy_speed(self):
|
||||||
|
rep_copy_speed = self.rep_primary.conf.safe_get(
|
||||||
|
self.driver_info['param_prefix'] + '_replication_copy_speed')
|
||||||
|
if rep_copy_speed:
|
||||||
|
return rep_copy_speed
|
||||||
|
else:
|
||||||
|
return self.rep_primary.conf.hitachi_copy_speed
|
||||||
|
|
||||||
|
def _get_wait_pair_status_change_params(self, wait_type):
|
||||||
|
"""Get a replication pair status information."""
|
||||||
|
_wait_pair_status_change_params = {
|
||||||
|
_WAIT_PAIR: {
|
||||||
|
'instance': self.rep_primary,
|
||||||
|
'remote_client': self.rep_secondary.client,
|
||||||
|
'is_secondary': False,
|
||||||
|
'transitional_status': ['COPY'],
|
||||||
|
'expected_status': ['PAIR', 'PFUL'],
|
||||||
|
'msgid': MSG.CREATE_REPLICATION_PAIR_FAILED,
|
||||||
|
'status_keys': ['pvolStatus', 'svolStatus'],
|
||||||
|
},
|
||||||
|
_WAIT_PSUS: {
|
||||||
|
'instance': self.rep_primary,
|
||||||
|
'remote_client': self.rep_secondary.client,
|
||||||
|
'is_secondary': False,
|
||||||
|
'transitional_status': ['PAIR', 'PFUL'],
|
||||||
|
'expected_status': ['PSUS', 'SSUS'],
|
||||||
|
'msgid': MSG.SPLIT_REPLICATION_PAIR_FAILED,
|
||||||
|
'status_keys': ['pvolStatus', 'svolStatus'],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _wait_pair_status_change_params[wait_type]
|
||||||
|
|
||||||
|
def _wait_pair_status_change(self, copy_group_name, pvol, svol,
|
||||||
|
rep_type, wait_type):
|
||||||
|
"""Wait until the replication pair status changes to the specified
|
||||||
|
|
||||||
|
status.
|
||||||
|
"""
|
||||||
|
for _ in _delays(
|
||||||
|
self.conf.hitachi_replication_status_check_short_interval,
|
||||||
|
self.conf.hitachi_replication_status_check_long_interval,
|
||||||
|
self.conf.hitachi_replication_status_check_timeout):
|
||||||
|
params = self._get_wait_pair_status_change_params(wait_type)
|
||||||
|
status = params['instance'].client.get_remote_copypair(
|
||||||
|
params['remote_client'], copy_group_name, pvol, svol,
|
||||||
|
is_secondary=params['is_secondary'])
|
||||||
|
statuses = [status.get(status_key) for status_key in
|
||||||
|
params['status_keys']]
|
||||||
|
unexpected_status_set = (set(statuses) -
|
||||||
|
set(params['expected_status']))
|
||||||
|
if not unexpected_status_set:
|
||||||
|
break
|
||||||
|
if unexpected_status_set.issubset(
|
||||||
|
set(params['transitional_status'])):
|
||||||
|
continue
|
||||||
|
msg = params['instance'].output_log(
|
||||||
|
params['msgid'], rep_type=rep_type, pvol=pvol, svol=svol,
|
||||||
|
copy_group=copy_group_name, status='/'.join(statuses))
|
||||||
|
self.raise_error(msg)
|
||||||
|
else:
|
||||||
|
status = params['instance'].client.get_remote_copypair(
|
||||||
|
params['remote_client'], copy_group_name, pvol, svol,
|
||||||
|
is_secondary=params['is_secondary'])
|
||||||
|
msg = params['instance'].output_log(
|
||||||
|
MSG.PAIR_CHANGE_TIMEOUT,
|
||||||
|
rep_type=rep_type, pvol=pvol, svol=svol,
|
||||||
|
copy_group=copy_group_name, current_status='/'.join(statuses),
|
||||||
|
expected_status=str(params['expected_status']),
|
||||||
|
timeout=self.conf.hitachi_replication_status_check_timeout)
|
||||||
|
self.raise_error(msg)
|
||||||
|
|
||||||
|
def _create_rep_pair(self, volume, pvol, svol, rep_type,
|
||||||
|
do_initialcopy=True):
|
||||||
|
"""Create a replication pair."""
|
||||||
|
copy_group_name = self._create_rep_copy_group_name(pvol)
|
||||||
|
|
||||||
|
@utils.synchronized_on_copy_group()
|
||||||
|
def inner(self, remote_client, copy_group_name, secondary_storage_id,
|
||||||
|
conf, copyPace, parent):
|
||||||
|
is_new_copy_grp = True
|
||||||
|
result = self.get_remote_copy_grps(remote_client)
|
||||||
|
if result:
|
||||||
|
for data in result:
|
||||||
|
if copy_group_name == data['copyGroupName']:
|
||||||
|
is_new_copy_grp = False
|
||||||
|
break
|
||||||
|
body = {
|
||||||
|
'copyGroupName': copy_group_name,
|
||||||
|
'copyPairName': parent._LDEV_NAME % (pvol, svol),
|
||||||
|
'replicationType': rep_type,
|
||||||
|
'remoteStorageDeviceId': secondary_storage_id,
|
||||||
|
'pvolLdevId': pvol,
|
||||||
|
'svolLdevId': svol,
|
||||||
|
'pathGroupId': conf.hitachi_path_group_id,
|
||||||
|
'localDeviceGroupName': copy_group_name + 'P',
|
||||||
|
'remoteDeviceGroupName': copy_group_name + 'S',
|
||||||
|
'isNewGroupCreation': is_new_copy_grp,
|
||||||
|
'doInitialCopy': do_initialcopy,
|
||||||
|
'isDataReductionForceCopy': False
|
||||||
|
}
|
||||||
|
if rep_type == parent.driver_info['mirror_attr']:
|
||||||
|
body['quorumDiskId'] = conf.hitachi_quorum_disk_id
|
||||||
|
body['copyPace'] = copyPace
|
||||||
|
if is_new_copy_grp:
|
||||||
|
body['muNumber'] = 0
|
||||||
|
self.add_remote_copypair(remote_client, body)
|
||||||
|
|
||||||
|
inner(
|
||||||
|
self.rep_primary.client, self.rep_secondary.client,
|
||||||
|
copy_group_name, self.rep_secondary.storage_id,
|
||||||
|
self.rep_secondary.conf, self._get_rep_copy_speed(),
|
||||||
|
self)
|
||||||
|
self._wait_pair_status_change(
|
||||||
|
copy_group_name, pvol, svol, rep_type, _WAIT_PAIR)
|
||||||
|
|
||||||
|
def _create_rep_ldev_and_pair(
|
||||||
|
self, volume, rep_type, pvol=None):
|
||||||
|
"""Create volume and Replication pair."""
|
||||||
|
svol = None
|
||||||
|
pvol, svol = self._create_rep_ldev(volume, rep_type, pvol)
|
||||||
|
try:
|
||||||
|
thread = greenthread.spawn(
|
||||||
|
self.rep_secondary.initialize_pair_connection, svol)
|
||||||
|
try:
|
||||||
|
self.rep_primary.initialize_pair_connection(pvol)
|
||||||
|
finally:
|
||||||
|
thread.wait()
|
||||||
|
if self.rep_primary.conf.\
|
||||||
|
hitachi_set_mirror_reserve_attribute:
|
||||||
|
self.rep_secondary.client.assign_virtual_ldevid(svol)
|
||||||
|
self._create_rep_pair(volume, pvol, svol, rep_type)
|
||||||
|
except Exception:
|
||||||
|
with excutils.save_and_reraise_exception():
|
||||||
|
if svol is not None:
|
||||||
|
self.rep_secondary.terminate_pair_connection(svol)
|
||||||
|
if self.rep_primary.conf.\
|
||||||
|
hitachi_set_mirror_reserve_attribute:
|
||||||
|
self.rep_secondary.client.unassign_virtual_ldevid(
|
||||||
|
svol)
|
||||||
|
self.rep_secondary.delete_ldev(svol)
|
||||||
|
if pvol is not None:
|
||||||
|
self.rep_primary.terminate_pair_connection(pvol)
|
||||||
|
self.rep_primary.delete_ldev(pvol)
|
||||||
|
return pvol, svol
|
||||||
|
|
||||||
|
def create_volume(self, volume):
|
||||||
|
"""Create a volume from a volume or snapshot and return its properties.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self._require_rep_primary()
|
||||||
|
extra_specs = self.rep_primary.get_volume_extra_specs(volume)
|
||||||
|
if self._is_mirror_spec(extra_specs):
|
||||||
|
self._require_rep_secondary()
|
||||||
|
rep_type = self.driver_info['mirror_attr']
|
||||||
|
pldev, sldev = self._create_rep_ldev_and_pair(
|
||||||
|
volume, rep_type)
|
||||||
|
provider_location = _pack_rep_provider_location(
|
||||||
|
pldev, sldev, rep_type)
|
||||||
|
return {
|
||||||
|
'provider_location': provider_location
|
||||||
|
}
|
||||||
|
return self.rep_primary.create_volume(volume)
|
||||||
|
|
||||||
|
def _has_rep_pair(self, ldev):
|
||||||
|
ldev_info = self.rep_primary.get_ldev_info(
|
||||||
|
['status', 'attributes'], ldev)
|
||||||
|
return (ldev_info['status'] == rest.NORMAL_STS and
|
||||||
|
self.driver_info['mirror_attr'] in ldev_info['attributes'])
|
||||||
|
|
||||||
|
def _get_rep_pair_info(self, pldev):
|
||||||
|
"""Return replication pair info."""
|
||||||
|
pair_info = {}
|
||||||
|
if not self._has_rep_pair(pldev):
|
||||||
|
return pair_info
|
||||||
|
self._require_rep_secondary()
|
||||||
|
copy_group_name = self._create_rep_copy_group_name(pldev)
|
||||||
|
pairs = self.rep_primary.client.get_remote_copy_grp(
|
||||||
|
self.rep_secondary.client,
|
||||||
|
copy_group_name).get('copyPairs', [])
|
||||||
|
for pair in pairs:
|
||||||
|
if (pair.get('replicationType') in
|
||||||
|
[self.driver_info['mirror_attr']] and
|
||||||
|
pair['pvolLdevId'] == pldev):
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
return pair_info
|
||||||
|
pair_info['pvol'] = pldev
|
||||||
|
pair_info['svol_info'] = [{
|
||||||
|
'ldev': pair.get('svolLdevId'),
|
||||||
|
'rep_type': pair.get('replicationType'),
|
||||||
|
'is_psus': pair.get('svolStatus') in ['SSUS', 'PFUS'],
|
||||||
|
'pvol_status': pair.get('pvolStatus'),
|
||||||
|
'svol_status': pair.get('svolStatus')}]
|
||||||
|
return pair_info
|
||||||
|
|
||||||
|
def _split_rep_pair(self, pvol, svol):
|
||||||
|
copy_group_name = self._create_rep_copy_group_name(pvol)
|
||||||
|
rep_type = self.driver_info['mirror_attr']
|
||||||
|
self.rep_primary.client.split_remote_copypair(
|
||||||
|
self.rep_secondary.client, copy_group_name, pvol, svol, rep_type)
|
||||||
|
self._wait_pair_status_change(
|
||||||
|
copy_group_name, pvol, svol, rep_type, _WAIT_PSUS)
|
||||||
|
|
||||||
|
def _delete_rep_pair(self, pvol, svol):
|
||||||
|
"""Delete a replication pair."""
|
||||||
|
copy_group_name = self._create_rep_copy_group_name(pvol)
|
||||||
|
self._split_rep_pair(pvol, svol)
|
||||||
|
self.rep_primary.client.delete_remote_copypair(
|
||||||
|
self.rep_secondary.client, copy_group_name, pvol, svol)
|
||||||
|
|
||||||
|
def delete_volume(self, volume):
|
||||||
|
"""Delete the specified volume."""
|
||||||
|
self._require_rep_primary()
|
||||||
|
ldev = self.rep_primary.get_ldev(volume)
|
||||||
|
if ldev is None:
|
||||||
|
self.rep_primary.output_log(
|
||||||
|
MSG.INVALID_LDEV_FOR_DELETION, method='delete_volume',
|
||||||
|
id=volume.id)
|
||||||
|
return
|
||||||
|
pair_info = self._get_rep_pair_info(ldev)
|
||||||
|
if pair_info:
|
||||||
|
self._delete_rep_pair(
|
||||||
|
pair_info['pvol'], pair_info['svol_info'][0]['ldev'])
|
||||||
|
thread = greenthread.spawn(
|
||||||
|
self.rep_secondary.delete_volume, volume)
|
||||||
|
try:
|
||||||
|
self.rep_primary.delete_volume(volume)
|
||||||
|
finally:
|
||||||
|
thread.wait()
|
||||||
|
else:
|
||||||
|
self.rep_primary.delete_volume(volume)
|
||||||
|
|
||||||
|
def delete_ldev(self, ldev):
|
||||||
|
self._require_rep_primary()
|
||||||
|
pair_info = self._get_rep_pair_info(ldev)
|
||||||
|
if pair_info:
|
||||||
|
self._delete_rep_pair(ldev, pair_info['svol_info'][0]['ldev'])
|
||||||
|
th = greenthread.spawn(self.rep_secondary.delete_ldev,
|
||||||
|
pair_info['svol_info'][0]['ldev'])
|
||||||
|
try:
|
||||||
|
self.rep_primary.delete_ldev(ldev)
|
||||||
|
finally:
|
||||||
|
th.wait()
|
||||||
|
else:
|
||||||
|
self.rep_primary.delete_ldev(ldev)
|
||||||
|
|
||||||
|
def _create_rep_volume_from_src(self, volume, src, src_type, operation):
|
||||||
|
"""Create a replication volume from a volume or snapshot and return
|
||||||
|
|
||||||
|
its properties.
|
||||||
|
"""
|
||||||
|
rep_type = self.driver_info['mirror_attr']
|
||||||
|
data = self.rep_primary.create_volume_from_src(
|
||||||
|
volume, src, src_type, is_rep=True)
|
||||||
|
new_ldev = self.rep_primary.get_ldev(data)
|
||||||
|
sldev = self._create_rep_ldev_and_pair(
|
||||||
|
volume, rep_type, new_ldev)[1]
|
||||||
|
provider_location = _pack_rep_provider_location(
|
||||||
|
new_ldev, sldev, rep_type)
|
||||||
|
return {
|
||||||
|
'provider_location': provider_location,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _create_volume_from_src(self, volume, src, src_type):
|
||||||
|
"""Create a volume from a volume or snapshot and return its properties.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self._require_rep_primary()
|
||||||
|
operation = ('create a volume from a %s' % src_type)
|
||||||
|
extra_specs = self.rep_primary.get_volume_extra_specs(volume)
|
||||||
|
if self._is_mirror_spec(extra_specs):
|
||||||
|
self._require_rep_secondary()
|
||||||
|
return self._create_rep_volume_from_src(
|
||||||
|
volume, src, src_type, operation)
|
||||||
|
return self.rep_primary.create_volume_from_src(volume, src, src_type)
|
||||||
|
|
||||||
|
def create_cloned_volume(self, volume, src_vref):
|
||||||
|
"""Create a clone of the specified volume and return its properties."""
|
||||||
|
return self._create_volume_from_src(
|
||||||
|
volume, src_vref, common.STR_VOLUME)
|
||||||
|
|
||||||
|
def create_volume_from_snapshot(self, volume, snapshot):
|
||||||
|
"""Create a volume from a snapshot and return its properties."""
|
||||||
|
return self._create_volume_from_src(
|
||||||
|
volume, snapshot, common.STR_SNAPSHOT)
|
||||||
|
|
||||||
|
def create_snapshot(self, snapshot):
|
||||||
|
"""Create a snapshot from a volume and return its properties."""
|
||||||
|
self._require_rep_primary()
|
||||||
|
return self.rep_primary.create_snapshot(snapshot)
|
||||||
|
|
||||||
|
def delete_snapshot(self, snapshot):
|
||||||
|
"""Delete the specified snapshot."""
|
||||||
|
self._require_rep_primary()
|
||||||
|
self.rep_primary.delete_snapshot(snapshot)
|
||||||
|
|
||||||
|
def _get_remote_copy_mode(self, vol):
|
||||||
|
provider_location = vol.get('provider_location')
|
||||||
|
if not provider_location:
|
||||||
|
return None
|
||||||
|
if provider_location.startswith('{'):
|
||||||
|
loc = json.loads(provider_location)
|
||||||
|
if isinstance(loc, dict):
|
||||||
|
return loc.get('remote-copy')
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _merge_properties(self, prop1, prop2):
|
||||||
|
if prop1 is None:
|
||||||
|
if prop2 is None:
|
||||||
|
return []
|
||||||
|
return prop2
|
||||||
|
elif prop2 is None:
|
||||||
|
return prop1
|
||||||
|
d = dict(prop1)
|
||||||
|
for key in ('target_luns', 'target_wwn', 'target_portals',
|
||||||
|
'target_iqns'):
|
||||||
|
if key in d:
|
||||||
|
d[key] = d[key] + prop2[key]
|
||||||
|
if 'initiator_target_map' in d:
|
||||||
|
for key2 in d['initiator_target_map']:
|
||||||
|
d['initiator_target_map'][key2] = (
|
||||||
|
d['initiator_target_map'][key2]
|
||||||
|
+ prop2['initiator_target_map'][key2])
|
||||||
|
return d
|
||||||
|
|
||||||
|
def initialize_connection_mirror(self, volume, connector):
|
||||||
|
lun = None
|
||||||
|
prop1 = None
|
||||||
|
prop2 = None
|
||||||
|
if self.rep_primary:
|
||||||
|
try:
|
||||||
|
conn_info1 = (
|
||||||
|
self.rep_primary.initialize_connection(
|
||||||
|
volume, connector, is_mirror=True))
|
||||||
|
except Exception as ex:
|
||||||
|
self.rep_primary.output_log(
|
||||||
|
MSG.REPLICATION_VOLUME_OPERATION_FAILED,
|
||||||
|
operation='attach', type='P-VOL',
|
||||||
|
volume_id=volume.id, reason=str(ex))
|
||||||
|
else:
|
||||||
|
prop1 = conn_info1['data']
|
||||||
|
if self.driver_info['volume_type'] == 'fibre_channel':
|
||||||
|
if 'target_lun' in prop1:
|
||||||
|
lun = prop1['target_lun']
|
||||||
|
else:
|
||||||
|
lun = prop1['target_luns'][0]
|
||||||
|
if self.rep_secondary:
|
||||||
|
try:
|
||||||
|
conn_info2 = (
|
||||||
|
self.rep_secondary.initialize_connection(
|
||||||
|
volume, connector, lun=lun, is_mirror=True))
|
||||||
|
except Exception as ex:
|
||||||
|
self.rep_secondary.output_log(
|
||||||
|
MSG.REPLICATION_VOLUME_OPERATION_FAILED,
|
||||||
|
operation='attach', type='S-VOL',
|
||||||
|
volume_id=volume.id, reason=str(ex))
|
||||||
|
if prop1 is None:
|
||||||
|
raise ex
|
||||||
|
else:
|
||||||
|
prop2 = conn_info2['data']
|
||||||
|
conn_info = {
|
||||||
|
'driver_volume_type': self.driver_info['volume_type'],
|
||||||
|
'data': self._merge_properties(prop1, prop2),
|
||||||
|
}
|
||||||
|
return conn_info
|
||||||
|
|
||||||
|
def initialize_connection(self, volume, connector, is_snapshot=False):
|
||||||
|
"""Initialize connection between the server and the volume."""
|
||||||
|
if (self._get_remote_copy_mode(volume) ==
|
||||||
|
self.driver_info['mirror_attr']):
|
||||||
|
conn_info = self.initialize_connection_mirror(volume, connector)
|
||||||
|
if self.driver_info['volume_type'] == 'fibre_channel':
|
||||||
|
fczm_utils.add_fc_zone(conn_info)
|
||||||
|
return conn_info
|
||||||
|
else:
|
||||||
|
self._require_rep_primary()
|
||||||
|
return self.rep_primary.initialize_connection(
|
||||||
|
volume, connector, is_snapshot)
|
||||||
|
|
||||||
|
def terminate_connection_mirror(self, volume, connector):
|
||||||
|
prop1 = None
|
||||||
|
prop2 = None
|
||||||
|
if self.rep_primary:
|
||||||
|
try:
|
||||||
|
conn_info1 = self.rep_primary.terminate_connection(
|
||||||
|
volume, connector, is_mirror=True)
|
||||||
|
except Exception as ex:
|
||||||
|
self.rep_primary.output_log(
|
||||||
|
MSG.REPLICATION_VOLUME_OPERATION_FAILED,
|
||||||
|
operation='detach', type='P-VOL',
|
||||||
|
volume_id=volume.id, reason=str(ex))
|
||||||
|
raise ex
|
||||||
|
else:
|
||||||
|
if conn_info1:
|
||||||
|
prop1 = conn_info1['data']
|
||||||
|
if self.rep_secondary:
|
||||||
|
try:
|
||||||
|
conn_info2 = self.rep_secondary.terminate_connection(
|
||||||
|
volume, connector, is_mirror=True)
|
||||||
|
except Exception as ex:
|
||||||
|
self.rep_secondary.output_log(
|
||||||
|
MSG.REPLICATION_VOLUME_OPERATION_FAILED,
|
||||||
|
operation='detach', type='S-VOL',
|
||||||
|
volume_id=volume.id, reason=str(ex))
|
||||||
|
raise ex
|
||||||
|
else:
|
||||||
|
if conn_info2:
|
||||||
|
prop2 = conn_info2['data']
|
||||||
|
conn_info = {
|
||||||
|
'driver_volume_type': self.driver_info['volume_type'],
|
||||||
|
'data': self._merge_properties(prop1, prop2),
|
||||||
|
}
|
||||||
|
return conn_info
|
||||||
|
|
||||||
|
def terminate_connection(self, volume, connector):
|
||||||
|
"""Terminate connection between the server and the volume."""
|
||||||
|
if (self._get_remote_copy_mode(volume) ==
|
||||||
|
self.driver_info['mirror_attr']):
|
||||||
|
conn_info = self.terminate_connection_mirror(volume, connector)
|
||||||
|
if self.driver_info['volume_type'] == 'fibre_channel':
|
||||||
|
fczm_utils.remove_fc_zone(conn_info)
|
||||||
|
return conn_info
|
||||||
|
else:
|
||||||
|
self._require_rep_primary()
|
||||||
|
return self.rep_primary.terminate_connection(volume, connector)
|
||||||
|
|
||||||
|
def _extend_pair_volume(self, volume, new_size, ldev, pair_info):
|
||||||
|
"""Extend the specified replication volume to the specified size."""
|
||||||
|
rep_type = self.driver_info['mirror_attr']
|
||||||
|
pvol_info = self.rep_primary.get_ldev_info(
|
||||||
|
['numOfPorts'], pair_info['pvol'])
|
||||||
|
if pvol_info['numOfPorts'] > 1:
|
||||||
|
msg = self.rep_primary.output_log(
|
||||||
|
MSG.EXTEND_REPLICATION_VOLUME_ERROR,
|
||||||
|
rep_type=rep_type, volume_id=volume.id, ldev=ldev,
|
||||||
|
source_size=volume.size, destination_size=new_size,
|
||||||
|
pvol=pair_info['pvol'], svol='',
|
||||||
|
pvol_num_of_ports=pvol_info['numOfPorts'],
|
||||||
|
svol_num_of_ports='')
|
||||||
|
self.raise_error(msg)
|
||||||
|
self._delete_rep_pair(
|
||||||
|
ldev, pair_info['svol_info'][0]['ldev'])
|
||||||
|
thread = greenthread.spawn(
|
||||||
|
self.rep_secondary.extend_volume, volume, new_size)
|
||||||
|
try:
|
||||||
|
self.rep_primary.extend_volume(volume, new_size)
|
||||||
|
finally:
|
||||||
|
thread.wait()
|
||||||
|
self._create_rep_pair(
|
||||||
|
volume, pair_info['pvol'], pair_info['svol_info'][0]['ldev'],
|
||||||
|
rep_type, do_initialcopy=False)
|
||||||
|
|
||||||
|
def extend_volume(self, volume, new_size):
|
||||||
|
"""Extend the specified volume to the specified size."""
|
||||||
|
self._require_rep_primary()
|
||||||
|
ldev = self.rep_primary.get_ldev(volume)
|
||||||
|
if ldev is None:
|
||||||
|
msg = self.rep_primary.output_log(
|
||||||
|
MSG.INVALID_LDEV_FOR_EXTENSION, volume_id=volume.id)
|
||||||
|
self.raise_error(msg)
|
||||||
|
pair_info = self._get_rep_pair_info(ldev)
|
||||||
|
if pair_info:
|
||||||
|
self._extend_pair_volume(volume, new_size, ldev, pair_info)
|
||||||
|
else:
|
||||||
|
self.rep_primary.extend_volume(volume, new_size)
|
||||||
|
|
||||||
|
def manage_existing(self, volume, existing_ref):
|
||||||
|
"""Return volume properties which Cinder needs to manage the volume."""
|
||||||
|
self._require_rep_primary()
|
||||||
|
return self.rep_primary.manage_existing(volume, existing_ref)
|
||||||
|
|
||||||
|
def manage_existing_get_size(self, volume, existing_ref):
|
||||||
|
"""Return the size[GB] of the specified volume."""
|
||||||
|
self._require_rep_primary()
|
||||||
|
return self.rep_primary.manage_existing_get_size(volume, existing_ref)
|
||||||
|
|
||||||
|
def unmanage(self, volume):
|
||||||
|
"""Prepare the volume for removing it from Cinder management."""
|
||||||
|
self._require_rep_primary()
|
||||||
|
ldev = self.rep_primary.get_ldev(volume)
|
||||||
|
if ldev is None:
|
||||||
|
self.rep_primary.output_log(
|
||||||
|
MSG.INVALID_LDEV_FOR_DELETION,
|
||||||
|
method='unmanage', id=volume.id)
|
||||||
|
return
|
||||||
|
if self._has_rep_pair(ldev):
|
||||||
|
msg = self.rep_primary.output_log(
|
||||||
|
MSG.REPLICATION_PAIR_ERROR,
|
||||||
|
operation='unmanage a volume', volume=volume.id,
|
||||||
|
snapshot_info='', ldev=ldev)
|
||||||
|
self.raise_error(msg)
|
||||||
|
self.rep_primary.unmanage(volume)
|
||||||
|
|
||||||
|
def discard_zero_page(self, volume):
|
||||||
|
self._require_rep_primary()
|
||||||
|
ldev = self.rep_primary.get_ldev(volume)
|
||||||
|
if self._has_rep_pair(ldev):
|
||||||
|
self._require_rep_secondary()
|
||||||
|
th = greenthread.spawn(
|
||||||
|
self.rep_secondary.discard_zero_page, volume)
|
||||||
|
try:
|
||||||
|
self.rep_primary.discard_zero_page(volume)
|
||||||
|
finally:
|
||||||
|
th.wait()
|
||||||
|
else:
|
||||||
|
self.rep_primary.discard_zero_page(volume)
|
||||||
|
|
||||||
|
def unmanage_snapshot(self, snapshot):
|
||||||
|
if not self.rep_primary:
|
||||||
|
return self.rep_secondary.unmanage_snapshot(snapshot)
|
||||||
|
else:
|
||||||
|
return self.rep_primary.unmanage_snapshot(snapshot)
|
||||||
|
|
||||||
|
def retype(self, ctxt, volume, new_type, diff, host):
|
||||||
|
self._require_rep_primary()
|
||||||
|
ldev = self.rep_primary.get_ldev(volume)
|
||||||
|
if ldev is None:
|
||||||
|
msg = self.rep_primary.output_log(
|
||||||
|
MSG.INVALID_LDEV_FOR_VOLUME_COPY,
|
||||||
|
type='volume', id=volume.id)
|
||||||
|
self.raise_error(msg)
|
||||||
|
if (self._has_rep_pair(ldev) or
|
||||||
|
self._is_mirror_spec(new_type['extra_specs'])):
|
||||||
|
return False
|
||||||
|
return self.rep_primary.retype(
|
||||||
|
ctxt, volume, new_type, diff, host)
|
||||||
|
|
||||||
|
def migrate_volume(self, volume, host):
|
||||||
|
self._require_rep_primary()
|
||||||
|
ldev = self.rep_primary.get_ldev(volume)
|
||||||
|
if ldev is None:
|
||||||
|
msg = self.rep_primary.output_log(
|
||||||
|
MSG.INVALID_LDEV_FOR_VOLUME_COPY,
|
||||||
|
type='volume', id=volume.id)
|
||||||
|
self.raise_error(msg)
|
||||||
|
if self._get_rep_pair_info(ldev):
|
||||||
|
return False, None
|
||||||
|
else:
|
||||||
|
return self.rep_primary.migrate_volume(volume, host)
|
||||||
|
|
||||||
|
def _resync_rep_pair(self, pvol, svol):
|
||||||
|
copy_group_name = self._create_rep_copy_group_name(pvol)
|
||||||
|
rep_type = self.driver_info['mirror_attr']
|
||||||
|
self.rep_primary.client.resync_remote_copypair(
|
||||||
|
self.rep_secondary.client, copy_group_name, pvol, svol,
|
||||||
|
rep_type, copy_speed=self._get_rep_copy_speed())
|
||||||
|
self._wait_pair_status_change(
|
||||||
|
copy_group_name, pvol, svol, rep_type, _WAIT_PAIR)
|
||||||
|
|
||||||
|
def revert_to_snapshot(self, volume, snapshot):
|
||||||
|
"""Rollback the specified snapshot."""
|
||||||
|
self._require_rep_primary()
|
||||||
|
ldev = self.rep_primary.get_ldev(volume)
|
||||||
|
svol = self.rep_primary.get_ldev(snapshot)
|
||||||
|
if None in (ldev, svol):
|
||||||
|
raise NotImplementedError()
|
||||||
|
pair_info = self._get_rep_pair_info(ldev)
|
||||||
|
is_snap = self.rep_primary.has_snap_pair(ldev, svol)
|
||||||
|
if pair_info and is_snap:
|
||||||
|
self._split_rep_pair(pair_info['pvol'],
|
||||||
|
pair_info['svol_info'][0]['ldev'])
|
||||||
|
try:
|
||||||
|
self.rep_primary.revert_to_snapshot(volume, snapshot)
|
||||||
|
finally:
|
||||||
|
if pair_info and is_snap:
|
||||||
|
self._resync_rep_pair(pair_info['pvol'],
|
||||||
|
pair_info['svol_info'][0]['ldev'])
|
||||||
|
|
||||||
|
def create_group(self):
|
||||||
|
self._require_rep_primary()
|
||||||
|
return self.rep_primary.create_group()
|
||||||
|
|
||||||
|
def delete_group(self, group, volumes):
|
||||||
|
self._require_rep_primary()
|
||||||
|
return super(HBSDREPLICATION, self).delete_group(group, volumes)
|
||||||
|
|
||||||
|
def create_group_from_src(
|
||||||
|
self, context, group, volumes, snapshots=None, source_vols=None):
|
||||||
|
self._require_rep_primary()
|
||||||
|
return super(HBSDREPLICATION, self).create_group_from_src(
|
||||||
|
context, group, volumes, snapshots, source_vols)
|
||||||
|
|
||||||
|
def update_group(self, group, add_volumes=None):
|
||||||
|
self._require_rep_primary()
|
||||||
|
return self.rep_primary.update_group(group, add_volumes)
|
||||||
|
|
||||||
|
def create_group_snapshot(self, context, group_snapshot, snapshots):
|
||||||
|
self._require_rep_primary()
|
||||||
|
return self.rep_primary.create_group_snapshot(
|
||||||
|
context, group_snapshot, snapshots)
|
||||||
|
|
||||||
|
def delete_group_snapshot(self, group_snapshot, snapshots):
|
||||||
|
self._require_rep_primary()
|
||||||
|
return self.rep_primary.delete_group_snapshot(
|
||||||
|
group_snapshot, snapshots)
|
@ -91,6 +91,8 @@ _MAX_COPY_GROUP_NAME = 29
|
|||||||
_MAX_CTG_COUNT_EXCEEDED_ADD_SNAPSHOT = ('2E10', '2302')
|
_MAX_CTG_COUNT_EXCEEDED_ADD_SNAPSHOT = ('2E10', '2302')
|
||||||
_MAX_PAIR_COUNT_IN_CTG_EXCEEDED_ADD_SNAPSHOT = ('2E13', '9900')
|
_MAX_PAIR_COUNT_IN_CTG_EXCEEDED_ADD_SNAPSHOT = ('2E13', '9900')
|
||||||
|
|
||||||
|
_PAIR_TARGET_NAME_BODY_DEFAULT = 'pair00'
|
||||||
|
|
||||||
REST_VOLUME_OPTS = [
|
REST_VOLUME_OPTS = [
|
||||||
cfg.BoolOpt(
|
cfg.BoolOpt(
|
||||||
'hitachi_rest_disable_io_wait',
|
'hitachi_rest_disable_io_wait',
|
||||||
@ -190,6 +192,13 @@ REST_VOLUME_OPTS = [
|
|||||||
help='Host mode option for host group or iSCSI target.'),
|
help='Host mode option for host group or iSCSI target.'),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
REST_PAIR_OPTS = [
|
||||||
|
cfg.ListOpt(
|
||||||
|
'hitachi_rest_pair_target_ports',
|
||||||
|
default=[],
|
||||||
|
help='Target port names for pair of the host group or iSCSI target'),
|
||||||
|
]
|
||||||
|
|
||||||
_REQUIRED_REST_OPTS = [
|
_REQUIRED_REST_OPTS = [
|
||||||
'san_login',
|
'san_login',
|
||||||
'san_password',
|
'san_password',
|
||||||
@ -198,21 +207,26 @@ _REQUIRED_REST_OPTS = [
|
|||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
CONF.register_opts(REST_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP)
|
CONF.register_opts(REST_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP)
|
||||||
|
CONF.register_opts(REST_PAIR_OPTS, group=configuration.SHARED_CONF_GROUP)
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
MSG = utils.HBSDMsg
|
MSG = utils.HBSDMsg
|
||||||
|
|
||||||
|
|
||||||
def _is_valid_target(self, target, target_name, target_ports):
|
def _is_valid_target(self, target, target_name, target_ports, is_pair):
|
||||||
"""Check if the specified target is valid."""
|
"""Check if the specified target is valid."""
|
||||||
|
if is_pair:
|
||||||
|
return (target[:utils.PORT_ID_LENGTH] in target_ports and
|
||||||
|
target_name == self._PAIR_TARGET_NAME)
|
||||||
return (target[:utils.PORT_ID_LENGTH] in target_ports and
|
return (target[:utils.PORT_ID_LENGTH] in target_ports and
|
||||||
target_name.startswith(self.driver_info['target_prefix']))
|
target_name.startswith(self.driver_info['target_prefix']) and
|
||||||
|
target_name != self._PAIR_TARGET_NAME)
|
||||||
|
|
||||||
|
|
||||||
def _check_ldev_manageability(self, ldev_info, ldev, existing_ref):
|
def _check_ldev_manageability(self, ldev_info, ldev, existing_ref):
|
||||||
"""Check if the LDEV meets the criteria for being managed."""
|
"""Check if the LDEV meets the criteria for being managed."""
|
||||||
if ldev_info['status'] != NORMAL_STS:
|
if ldev_info['status'] != NORMAL_STS:
|
||||||
msg = utils.output_log(MSG.INVALID_LDEV_FOR_MANAGE)
|
msg = self.output_log(MSG.INVALID_LDEV_FOR_MANAGE)
|
||||||
raise exception.ManageExistingInvalidReference(
|
raise exception.ManageExistingInvalidReference(
|
||||||
existing_ref=existing_ref, reason=msg)
|
existing_ref=existing_ref, reason=msg)
|
||||||
attributes = set(ldev_info['attributes'])
|
attributes = set(ldev_info['attributes'])
|
||||||
@ -221,20 +235,20 @@ def _check_ldev_manageability(self, ldev_info, ldev, existing_ref):
|
|||||||
not attributes.issubset(
|
not attributes.issubset(
|
||||||
set(['CVS', self.driver_info['hdp_vol_attr'],
|
set(['CVS', self.driver_info['hdp_vol_attr'],
|
||||||
self.driver_info['hdt_vol_attr']]))):
|
self.driver_info['hdt_vol_attr']]))):
|
||||||
msg = utils.output_log(MSG.INVALID_LDEV_ATTR_FOR_MANAGE, ldev=ldev,
|
msg = self.output_log(MSG.INVALID_LDEV_ATTR_FOR_MANAGE, ldev=ldev,
|
||||||
ldevtype=self.driver_info['nvol_ldev_type'])
|
ldevtype=self.driver_info['nvol_ldev_type'])
|
||||||
raise exception.ManageExistingInvalidReference(
|
raise exception.ManageExistingInvalidReference(
|
||||||
existing_ref=existing_ref, reason=msg)
|
existing_ref=existing_ref, reason=msg)
|
||||||
if ldev_info['numOfPorts']:
|
if ldev_info['numOfPorts']:
|
||||||
msg = utils.output_log(MSG.INVALID_LDEV_PORT_FOR_MANAGE, ldev=ldev)
|
msg = self.output_log(MSG.INVALID_LDEV_PORT_FOR_MANAGE, ldev=ldev)
|
||||||
raise exception.ManageExistingInvalidReference(
|
raise exception.ManageExistingInvalidReference(
|
||||||
existing_ref=existing_ref, reason=msg)
|
existing_ref=existing_ref, reason=msg)
|
||||||
|
|
||||||
|
|
||||||
def _check_ldev_size(ldev_info, ldev, existing_ref):
|
def _check_ldev_size(self, ldev_info, ldev, existing_ref):
|
||||||
"""Hitachi storage calculates volume sizes in a block unit, 512 bytes."""
|
"""Hitachi storage calculates volume sizes in a block unit, 512 bytes."""
|
||||||
if ldev_info['blockCapacity'] % utils.GIGABYTE_PER_BLOCK_SIZE:
|
if ldev_info['blockCapacity'] % utils.GIGABYTE_PER_BLOCK_SIZE:
|
||||||
msg = utils.output_log(MSG.INVALID_LDEV_SIZE_FOR_MANAGE, ldev=ldev)
|
msg = self.output_log(MSG.INVALID_LDEV_SIZE_FOR_MANAGE, ldev=ldev)
|
||||||
raise exception.ManageExistingInvalidReference(
|
raise exception.ManageExistingInvalidReference(
|
||||||
existing_ref=existing_ref, reason=msg)
|
existing_ref=existing_ref, reason=msg)
|
||||||
|
|
||||||
@ -246,10 +260,24 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
"""Initialize instance variables."""
|
"""Initialize instance variables."""
|
||||||
super(HBSDREST, self).__init__(conf, storage_protocol, db)
|
super(HBSDREST, self).__init__(conf, storage_protocol, db)
|
||||||
self.conf.append_config_values(REST_VOLUME_OPTS)
|
self.conf.append_config_values(REST_VOLUME_OPTS)
|
||||||
|
self.conf.append_config_values(REST_PAIR_OPTS)
|
||||||
self.conf.append_config_values(san.san_opts)
|
self.conf.append_config_values(san.san_opts)
|
||||||
|
|
||||||
self.client = None
|
self.client = None
|
||||||
|
|
||||||
|
def do_setup(self, context):
|
||||||
|
if hasattr(
|
||||||
|
self.conf,
|
||||||
|
self.driver_info['param_prefix'] + '_pair_target_number'):
|
||||||
|
self._PAIR_TARGET_NAME_BODY = 'pair%02d' % (
|
||||||
|
self.conf.safe_get(self.driver_info['param_prefix'] +
|
||||||
|
'_pair_target_number'))
|
||||||
|
else:
|
||||||
|
self._PAIR_TARGET_NAME_BODY = _PAIR_TARGET_NAME_BODY_DEFAULT
|
||||||
|
self._PAIR_TARGET_NAME = (self.driver_info['target_prefix'] +
|
||||||
|
self._PAIR_TARGET_NAME_BODY)
|
||||||
|
super(HBSDREST, self).do_setup(context)
|
||||||
|
|
||||||
def setup_client(self):
|
def setup_client(self):
|
||||||
"""Initialize RestApiClient."""
|
"""Initialize RestApiClient."""
|
||||||
verify = self.conf.driver_ssl_cert_verify
|
verify = self.conf.driver_ssl_cert_verify
|
||||||
@ -258,6 +286,9 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
if verify_path:
|
if verify_path:
|
||||||
verify = verify_path
|
verify = verify_path
|
||||||
self.verify = verify
|
self.verify = verify
|
||||||
|
is_rep = False
|
||||||
|
if self.storage_id is not None:
|
||||||
|
is_rep = True
|
||||||
self.client = rest_api.RestApiClient(
|
self.client = rest_api.RestApiClient(
|
||||||
self.conf,
|
self.conf,
|
||||||
self.conf.san_ip,
|
self.conf.san_ip,
|
||||||
@ -267,7 +298,8 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
self.conf.san_password,
|
self.conf.san_password,
|
||||||
self.driver_info['driver_prefix'],
|
self.driver_info['driver_prefix'],
|
||||||
tcp_keepalive=self.conf.hitachi_rest_tcp_keepalive,
|
tcp_keepalive=self.conf.hitachi_rest_tcp_keepalive,
|
||||||
verify=verify)
|
verify=verify,
|
||||||
|
is_rep=is_rep)
|
||||||
self.client.login()
|
self.client.login()
|
||||||
|
|
||||||
def need_client_setup(self):
|
def need_client_setup(self):
|
||||||
@ -307,7 +339,7 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
"""Delete the specified LDEV from the storage."""
|
"""Delete the specified LDEV from the storage."""
|
||||||
result = self.client.get_ldev(ldev)
|
result = self.client.get_ldev(ldev)
|
||||||
if result['emulationType'] == 'NOT DEFINED':
|
if result['emulationType'] == 'NOT DEFINED':
|
||||||
utils.output_log(MSG.LDEV_NOT_EXIST, ldev=ldev)
|
self.output_log(MSG.LDEV_NOT_EXIST, ldev=ldev)
|
||||||
return
|
return
|
||||||
self.client.delete_ldev(
|
self.client.delete_ldev(
|
||||||
ldev,
|
ldev,
|
||||||
@ -352,7 +384,7 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
_wait_for_copy_pair_status, timeutils.utcnow(),
|
_wait_for_copy_pair_status, timeutils.utcnow(),
|
||||||
ldev, status, timeout)
|
ldev, status, timeout)
|
||||||
if not loop.start(interval=interval).wait():
|
if not loop.start(interval=interval).wait():
|
||||||
msg = utils.output_log(
|
msg = self.output_log(
|
||||||
MSG.PAIR_STATUS_WAIT_TIMEOUT, svol=ldev)
|
MSG.PAIR_STATUS_WAIT_TIMEOUT, svol=ldev)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
|
|
||||||
@ -375,7 +407,7 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
if (utils.safe_get_err_code(ex.kwargs.get('errobj')) ==
|
if (utils.safe_get_err_code(ex.kwargs.get('errobj')) ==
|
||||||
rest_api.INVALID_SNAPSHOT_POOL and
|
rest_api.INVALID_SNAPSHOT_POOL and
|
||||||
not self.conf.hitachi_snap_pool):
|
not self.conf.hitachi_snap_pool):
|
||||||
msg = utils.output_log(
|
msg = self.output_log(
|
||||||
MSG.INVALID_PARAMETER,
|
MSG.INVALID_PARAMETER,
|
||||||
param=self.driver_info['param_prefix'] + '_snap_pool')
|
param=self.driver_info['param_prefix'] + '_snap_pool')
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
@ -388,7 +420,7 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
try:
|
try:
|
||||||
self._delete_pair_from_storage(pvol, svol)
|
self._delete_pair_from_storage(pvol, svol)
|
||||||
except exception.VolumeDriverException:
|
except exception.VolumeDriverException:
|
||||||
utils.output_log(
|
self.output_log(
|
||||||
MSG.DELETE_PAIR_FAILED, pvol=pvol, svol=svol)
|
MSG.DELETE_PAIR_FAILED, pvol=pvol, svol=svol)
|
||||||
|
|
||||||
def _create_clone_pair(self, pvol, svol, snap_pool_id):
|
def _create_clone_pair(self, pvol, svol, snap_pool_id):
|
||||||
@ -417,7 +449,7 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
if (utils.safe_get_err_code(ex.kwargs.get('errobj')) ==
|
if (utils.safe_get_err_code(ex.kwargs.get('errobj')) ==
|
||||||
rest_api.INVALID_SNAPSHOT_POOL and
|
rest_api.INVALID_SNAPSHOT_POOL and
|
||||||
not self.conf.hitachi_snap_pool):
|
not self.conf.hitachi_snap_pool):
|
||||||
msg = utils.output_log(
|
msg = self.output_log(
|
||||||
MSG.INVALID_PARAMETER,
|
MSG.INVALID_PARAMETER,
|
||||||
param=self.driver_info['param_prefix'] + '_snap_pool')
|
param=self.driver_info['param_prefix'] + '_snap_pool')
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
@ -430,7 +462,7 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
try:
|
try:
|
||||||
self._delete_pair_from_storage(pvol, svol)
|
self._delete_pair_from_storage(pvol, svol)
|
||||||
except exception.VolumeDriverException:
|
except exception.VolumeDriverException:
|
||||||
utils.output_log(
|
self.output_log(
|
||||||
MSG.DELETE_PAIR_FAILED, pvol=pvol, svol=svol)
|
MSG.DELETE_PAIR_FAILED, pvol=pvol, svol=svol)
|
||||||
|
|
||||||
def create_pair_on_storage(
|
def create_pair_on_storage(
|
||||||
@ -468,7 +500,7 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
loop = loopingcall.FixedIntervalLoopingCall(
|
loop = loopingcall.FixedIntervalLoopingCall(
|
||||||
_wait_for_copy_pair_smpl, timeutils.utcnow(), ldev)
|
_wait_for_copy_pair_smpl, timeutils.utcnow(), ldev)
|
||||||
if not loop.start(interval=interval).wait():
|
if not loop.start(interval=interval).wait():
|
||||||
msg = utils.output_log(
|
msg = self.output_log(
|
||||||
MSG.PAIR_STATUS_WAIT_TIMEOUT, svol=ldev)
|
MSG.PAIR_STATUS_WAIT_TIMEOUT, svol=ldev)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
|
|
||||||
@ -489,27 +521,65 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
pvol, mun, ignore_return_code=ignore_return_code)
|
pvol, mun, ignore_return_code=ignore_return_code)
|
||||||
self._wait_copy_pair_deleting(svol)
|
self._wait_copy_pair_deleting(svol)
|
||||||
|
|
||||||
|
def _get_pair_ports(self):
|
||||||
|
return (self.storage_info['pair_ports'] or
|
||||||
|
self.storage_info['controller_ports'])
|
||||||
|
|
||||||
|
def terminate_pair_connection(self, ldev):
|
||||||
|
targets = {
|
||||||
|
'list': [],
|
||||||
|
}
|
||||||
|
ldev_info = self.get_ldev_info(['status', 'attributes'], ldev)
|
||||||
|
if (ldev_info['status'] == NORMAL_STS and
|
||||||
|
self.driver_info['mirror_attr'] in ldev_info['attributes']):
|
||||||
|
LOG.debug(
|
||||||
|
'The specified LDEV has replication pair. '
|
||||||
|
'Therefore, unmapping operation was skipped. '
|
||||||
|
'(LDEV: %(ldev)s, vol_attr: %(info)s)',
|
||||||
|
{'ldev': ldev, 'info': ldev_info['attributes']})
|
||||||
|
return
|
||||||
|
self._find_mapped_targets_from_storage(
|
||||||
|
targets, ldev, self._get_pair_ports(), is_pair=True)
|
||||||
|
self.unmap_ldev(targets, ldev)
|
||||||
|
|
||||||
def delete_pair_based_on_svol(self, pvol, svol_info):
|
def delete_pair_based_on_svol(self, pvol, svol_info):
|
||||||
"""Disconnect all volume pairs to which the specified S-VOL belongs."""
|
"""Disconnect all volume pairs to which the specified S-VOL belongs."""
|
||||||
# If the pair status does not satisfy the execution condition,
|
# If the pair status does not satisfy the execution condition,
|
||||||
if not (svol_info['is_psus'] or
|
if not (svol_info['is_psus'] or
|
||||||
_STATUS_TABLE.get(svol_info['status']) == SMPP):
|
_STATUS_TABLE.get(svol_info['status']) == SMPP):
|
||||||
utils.output_log(
|
self.output_log(
|
||||||
MSG.UNABLE_TO_DELETE_PAIR, pvol=pvol, svol=svol_info['ldev'])
|
MSG.UNABLE_TO_DELETE_PAIR, pvol=pvol, svol=svol_info['ldev'])
|
||||||
self.raise_busy()
|
self.raise_busy()
|
||||||
|
|
||||||
self._delete_pair_from_storage(pvol, svol_info['ldev'])
|
self._delete_pair_from_storage(pvol, svol_info['ldev'])
|
||||||
|
if hasattr(
|
||||||
|
self.conf,
|
||||||
|
self.driver_info['param_prefix'] + '_rest_pair_target_ports'):
|
||||||
|
self.terminate_pair_connection(svol_info['ldev'])
|
||||||
|
self.terminate_pair_connection(pvol)
|
||||||
|
|
||||||
def check_param(self):
|
def check_param(self):
|
||||||
"""Check parameter values and consistency among them."""
|
"""Check parameter values and consistency among them."""
|
||||||
super(HBSDREST, self).check_param()
|
super(HBSDREST, self).check_param()
|
||||||
self.check_opts(self.conf, REST_VOLUME_OPTS)
|
self.check_opts(self.conf, REST_VOLUME_OPTS)
|
||||||
self.check_opts(self.conf, san.san_opts)
|
self.check_opts(self.conf, san.san_opts)
|
||||||
|
if hasattr(
|
||||||
|
self.conf,
|
||||||
|
self.driver_info['param_prefix'] + '_rest_pair_target_ports'):
|
||||||
|
self.check_opts(self.conf, REST_PAIR_OPTS)
|
||||||
|
if (not self.conf.hitachi_target_ports and
|
||||||
|
not self.conf.hitachi_rest_pair_target_ports):
|
||||||
|
msg = self.output_log(
|
||||||
|
MSG.INVALID_PARAMETER,
|
||||||
|
param=self.driver_info['param_prefix'] +
|
||||||
|
'_target_ports or ' + self.driver_info['param_prefix'] +
|
||||||
|
'_rest_pair_target_ports')
|
||||||
|
self.raise_error(msg)
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
'Setting ldev_range: %s', self.storage_info['ldev_range'])
|
'Setting ldev_range: %s', self.storage_info['ldev_range'])
|
||||||
for opt in _REQUIRED_REST_OPTS:
|
for opt in _REQUIRED_REST_OPTS:
|
||||||
if not self.conf.safe_get(opt):
|
if not self.conf.safe_get(opt):
|
||||||
msg = utils.output_log(MSG.INVALID_PARAMETER, param=opt)
|
msg = self.output_log(MSG.INVALID_PARAMETER, param=opt)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
if not self.conf.safe_get('san_api_port'):
|
if not self.conf.safe_get('san_api_port'):
|
||||||
self.conf.san_api_port = _REST_DEFAULT_PORT
|
self.conf.san_api_port = _REST_DEFAULT_PORT
|
||||||
@ -544,8 +614,8 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
else:
|
else:
|
||||||
lun = assigned_lun
|
lun = assigned_lun
|
||||||
elif err_code == rest_api.ANOTHER_LDEV_MAPPED:
|
elif err_code == rest_api.ANOTHER_LDEV_MAPPED:
|
||||||
utils.output_log(MSG.MAP_LDEV_FAILED,
|
self.output_log(MSG.MAP_LDEV_FAILED,
|
||||||
ldev=ldev, port=port, id=gid, lun=lun)
|
ldev=ldev, port=port, id=gid, lun=lun)
|
||||||
return None
|
return None
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
'Created logical unit path to the specified logical device. '
|
'Created logical unit path to the specified logical device. '
|
||||||
@ -554,12 +624,18 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
{'ldev': ldev, 'port': port, 'gid': gid, 'lun': lun})
|
{'ldev': ldev, 'port': port, 'gid': gid, 'lun': lun})
|
||||||
return lun
|
return lun
|
||||||
|
|
||||||
def map_ldev(self, targets, ldev):
|
def map_ldev(self, targets, ldev, lun=None):
|
||||||
"""Create the path between the server and the LDEV and return LUN."""
|
"""Create the path between the server and the LDEV and return LUN."""
|
||||||
port, gid = targets['list'][0]
|
raise_err = False
|
||||||
lun = self._run_add_lun(ldev, port, gid)
|
if lun is not None:
|
||||||
targets['lun'][port] = True
|
head = 0
|
||||||
for port, gid in targets['list'][1:]:
|
raise_err = True
|
||||||
|
else:
|
||||||
|
head = 1
|
||||||
|
port, gid = targets['list'][0]
|
||||||
|
lun = self._run_add_lun(ldev, port, gid)
|
||||||
|
targets['lun'][port] = True
|
||||||
|
for port, gid in targets['list'][head:]:
|
||||||
# When multipath is configured, Nova compute expects that
|
# When multipath is configured, Nova compute expects that
|
||||||
# target_lun define the same value in all storage target.
|
# target_lun define the same value in all storage target.
|
||||||
# Therefore, it should use same value of lun in other target.
|
# Therefore, it should use same value of lun in other target.
|
||||||
@ -567,12 +643,19 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
lun2 = self._run_add_lun(ldev, port, gid, lun=lun)
|
lun2 = self._run_add_lun(ldev, port, gid, lun=lun)
|
||||||
if lun2 is not None:
|
if lun2 is not None:
|
||||||
targets['lun'][port] = True
|
targets['lun'][port] = True
|
||||||
|
raise_err = False
|
||||||
except exception.VolumeDriverException:
|
except exception.VolumeDriverException:
|
||||||
utils.output_log(MSG.MAP_LDEV_FAILED, ldev=ldev,
|
self.output_log(MSG.MAP_LDEV_FAILED, ldev=ldev,
|
||||||
port=port, id=gid, lun=lun)
|
port=port, id=gid, lun=lun)
|
||||||
|
if raise_err:
|
||||||
|
msg = self.output_log(
|
||||||
|
MSG.CONNECT_VOLUME_FAILED,
|
||||||
|
ldev=ldev, reason='Failed to attach in all ports.')
|
||||||
|
self.raise_error(msg)
|
||||||
return lun
|
return lun
|
||||||
|
|
||||||
def attach_ldev(self, volume, ldev, connector, is_snapshot, targets):
|
def attach_ldev(
|
||||||
|
self, volume, ldev, connector, is_snapshot, targets, lun=None):
|
||||||
"""Initialize connection between the server and the volume."""
|
"""Initialize connection between the server and the volume."""
|
||||||
target_ports = self.get_target_ports(connector)
|
target_ports = self.get_target_ports(connector)
|
||||||
target_ports = self.filter_target_ports(target_ports, volume,
|
target_ports = self.filter_target_ports(target_ports, volume,
|
||||||
@ -587,9 +670,10 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
targets['list'].sort()
|
targets['list'].sort()
|
||||||
for port in target_ports:
|
for port in target_ports:
|
||||||
targets['lun'][port] = False
|
targets['lun'][port] = False
|
||||||
return int(self.map_ldev(targets, ldev))
|
return int(self.map_ldev(targets, ldev, lun))
|
||||||
|
|
||||||
def _find_mapped_targets_from_storage(self, targets, ldev, target_ports):
|
def _find_mapped_targets_from_storage(
|
||||||
|
self, targets, ldev, target_ports, is_pair=False):
|
||||||
"""Update port-gid list for the specified LDEV."""
|
"""Update port-gid list for the specified LDEV."""
|
||||||
ldev_info = self.get_ldev_info(['ports'], ldev)
|
ldev_info = self.get_ldev_info(['ports'], ldev)
|
||||||
if not ldev_info['ports']:
|
if not ldev_info['ports']:
|
||||||
@ -597,7 +681,7 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
for port_info in ldev_info['ports']:
|
for port_info in ldev_info['ports']:
|
||||||
if _is_valid_target(self, port_info['portId'],
|
if _is_valid_target(self, port_info['portId'],
|
||||||
port_info['hostGroupName'],
|
port_info['hostGroupName'],
|
||||||
target_ports):
|
target_ports, is_pair):
|
||||||
targets['list'].append(port_info)
|
targets['list'].append(port_info)
|
||||||
|
|
||||||
def _get_unmap_targets_list(self, target_list, mapped_list):
|
def _get_unmap_targets_list(self, target_list, mapped_list):
|
||||||
@ -649,7 +733,7 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
self.client.delete_host_grp(port, gid)
|
self.client.delete_host_grp(port, gid)
|
||||||
result = 0
|
result = 0
|
||||||
except exception.VolumeDriverException:
|
except exception.VolumeDriverException:
|
||||||
utils.output_log(MSG.DELETE_TARGET_FAILED, port=port, id=gid)
|
self.output_log(MSG.DELETE_TARGET_FAILED, port=port, id=gid)
|
||||||
else:
|
else:
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
'Deleted target. (port: %(port)s, gid: %(gid)s)',
|
'Deleted target. (port: %(port)s, gid: %(gid)s)',
|
||||||
@ -717,7 +801,7 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
rest_api.MSGID_SPECIFIED_OBJECT_DOES_NOT_EXIST])
|
rest_api.MSGID_SPECIFIED_OBJECT_DOES_NOT_EXIST])
|
||||||
|
|
||||||
if 'errorSource' in result:
|
if 'errorSource' in result:
|
||||||
msg = utils.output_log(MSG.POOL_NOT_FOUND, pool=pool_id)
|
msg = self.output_log(MSG.POOL_NOT_FOUND, pool=pool_id)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
|
|
||||||
tp_cap = result['totalPoolCapacity'] // units.Ki
|
tp_cap = result['totalPoolCapacity'] // units.Ki
|
||||||
@ -731,7 +815,7 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
try:
|
try:
|
||||||
result = self.client.get_pools()
|
result = self.client.get_pools()
|
||||||
except exception.VolumeDriverException:
|
except exception.VolumeDriverException:
|
||||||
utils.output_log(MSG.POOL_INFO_RETRIEVAL_FAILED, pool='all')
|
self.output_log(MSG.POOL_INFO_RETRIEVAL_FAILED, pool='all')
|
||||||
pool_infos = []
|
pool_infos = []
|
||||||
for pool_id in pool_ids:
|
for pool_id in pool_ids:
|
||||||
for pool_data in result:
|
for pool_data in result:
|
||||||
@ -739,7 +823,7 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
cap_data = self.get_pool_info(pool_id, pool_data)
|
cap_data = self.get_pool_info(pool_id, pool_data)
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
utils.output_log(MSG.POOL_NOT_FOUND, pool=pool_id)
|
self.output_log(MSG.POOL_NOT_FOUND, pool=pool_id)
|
||||||
cap_data = None
|
cap_data = None
|
||||||
pool_infos.append(cap_data)
|
pool_infos.append(cap_data)
|
||||||
return pool_infos
|
return pool_infos
|
||||||
@ -747,11 +831,11 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
def discard_zero_page(self, volume):
|
def discard_zero_page(self, volume):
|
||||||
"""Return the volume's no-data pages to the storage pool."""
|
"""Return the volume's no-data pages to the storage pool."""
|
||||||
if self.conf.hitachi_discard_zero_page:
|
if self.conf.hitachi_discard_zero_page:
|
||||||
ldev = utils.get_ldev(volume)
|
ldev = self.get_ldev(volume)
|
||||||
try:
|
try:
|
||||||
self.client.discard_zero_page(ldev)
|
self.client.discard_zero_page(ldev)
|
||||||
except exception.VolumeDriverException:
|
except exception.VolumeDriverException:
|
||||||
utils.output_log(MSG.DISCARD_ZERO_PAGE_FAILED, ldev=ldev)
|
self.output_log(MSG.DISCARD_ZERO_PAGE_FAILED, ldev=ldev)
|
||||||
|
|
||||||
def _get_copy_pair_info(self, ldev):
|
def _get_copy_pair_info(self, ldev):
|
||||||
"""Return info of the copy pair."""
|
"""Return info of the copy pair."""
|
||||||
@ -832,7 +916,7 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
"""Return the size[GB] of the specified LDEV."""
|
"""Return the size[GB] of the specified LDEV."""
|
||||||
ldev_info = self.get_ldev_info(
|
ldev_info = self.get_ldev_info(
|
||||||
_CHECK_LDEV_SIZE_KEYS, ldev)
|
_CHECK_LDEV_SIZE_KEYS, ldev)
|
||||||
_check_ldev_size(ldev_info, ldev, existing_ref)
|
_check_ldev_size(self, ldev_info, ldev, existing_ref)
|
||||||
return ldev_info['blockCapacity'] / utils.GIGABYTE_PER_BLOCK_SIZE
|
return ldev_info['blockCapacity'] / utils.GIGABYTE_PER_BLOCK_SIZE
|
||||||
|
|
||||||
def _get_pool_id(self, pool_list, pool_name_or_id):
|
def _get_pool_id(self, pool_list, pool_name_or_id):
|
||||||
@ -844,7 +928,7 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
for pool_data in pool_list['pool_list']:
|
for pool_data in pool_list['pool_list']:
|
||||||
if pool_data['poolName'] == pool_name_or_id:
|
if pool_data['poolName'] == pool_name_or_id:
|
||||||
return pool_data['poolId']
|
return pool_data['poolId']
|
||||||
msg = utils.output_log(MSG.POOL_NOT_FOUND, pool=pool_name_or_id)
|
msg = self.output_log(MSG.POOL_NOT_FOUND, pool=pool_name_or_id)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
|
|
||||||
def check_pool_id(self):
|
def check_pool_id(self):
|
||||||
@ -942,11 +1026,11 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
obj_update['status'] = 'available' if isinstance(
|
obj_update['status'] = 'available' if isinstance(
|
||||||
exc, (exception.VolumeIsBusy,
|
exc, (exception.VolumeIsBusy,
|
||||||
exception.SnapshotIsBusy)) else 'error'
|
exception.SnapshotIsBusy)) else 'error'
|
||||||
utils.output_log(
|
self.output_log(
|
||||||
MSG.GROUP_OBJECT_DELETE_FAILED,
|
MSG.GROUP_OBJECT_DELETE_FAILED,
|
||||||
obj='snapshot' if is_snapshot else 'volume',
|
obj='snapshot' if is_snapshot else 'volume',
|
||||||
group='group snapshot' if is_snapshot else 'group',
|
group='group snapshot' if is_snapshot else 'group',
|
||||||
group_id=group.id, obj_id=obj.id, ldev=utils.get_ldev(obj),
|
group_id=group.id, obj_id=obj.id, ldev=self.get_ldev(obj),
|
||||||
reason=exc.msg)
|
reason=exc.msg)
|
||||||
raise loopingcall.LoopingCallDone(obj_update)
|
raise loopingcall.LoopingCallDone(obj_update)
|
||||||
|
|
||||||
@ -977,9 +1061,9 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
def _create_group_volume_from_src(context, volume, src, from_snapshot):
|
def _create_group_volume_from_src(context, volume, src, from_snapshot):
|
||||||
volume_model_update = {'id': volume.id}
|
volume_model_update = {'id': volume.id}
|
||||||
try:
|
try:
|
||||||
ldev = utils.get_ldev(src)
|
ldev = self.get_ldev(src)
|
||||||
if ldev is None:
|
if ldev is None:
|
||||||
msg = utils.output_log(
|
msg = self.output_log(
|
||||||
MSG.INVALID_LDEV_FOR_VOLUME_COPY,
|
MSG.INVALID_LDEV_FOR_VOLUME_COPY,
|
||||||
type='snapshot' if from_snapshot else 'volume',
|
type='snapshot' if from_snapshot else 'volume',
|
||||||
id=src.id)
|
id=src.id)
|
||||||
@ -1009,7 +1093,7 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
msg = volume_model_update['msg']
|
msg = volume_model_update['msg']
|
||||||
else:
|
else:
|
||||||
volumes_model_update.append(volume_model_update)
|
volumes_model_update.append(volume_model_update)
|
||||||
ldev = utils.get_ldev(volume_model_update)
|
ldev = self.get_ldev(volume_model_update)
|
||||||
if ldev is not None:
|
if ldev is not None:
|
||||||
new_ldevs.append(ldev)
|
new_ldevs.append(ldev)
|
||||||
if not is_success:
|
if not is_success:
|
||||||
@ -1020,18 +1104,18 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
try:
|
try:
|
||||||
self.delete_ldev(new_ldev)
|
self.delete_ldev(new_ldev)
|
||||||
except exception.VolumeDriverException:
|
except exception.VolumeDriverException:
|
||||||
utils.output_log(MSG.DELETE_LDEV_FAILED, ldev=new_ldev)
|
self.output_log(MSG.DELETE_LDEV_FAILED, ldev=new_ldev)
|
||||||
return None, volumes_model_update
|
return None, volumes_model_update
|
||||||
|
|
||||||
def update_group(self, group, add_volumes=None):
|
def update_group(self, group, add_volumes=None):
|
||||||
if add_volumes and volume_utils.is_group_a_cg_snapshot_type(group):
|
if add_volumes and volume_utils.is_group_a_cg_snapshot_type(group):
|
||||||
for volume in add_volumes:
|
for volume in add_volumes:
|
||||||
ldev = utils.get_ldev(volume)
|
ldev = self.get_ldev(volume)
|
||||||
if ldev is None:
|
if ldev is None:
|
||||||
msg = utils.output_log(MSG.LDEV_NOT_EXIST_FOR_ADD_GROUP,
|
msg = self.output_log(MSG.LDEV_NOT_EXIST_FOR_ADD_GROUP,
|
||||||
volume_id=volume.id,
|
volume_id=volume.id,
|
||||||
group='consistency group',
|
group='consistency group',
|
||||||
group_id=group.id)
|
group_id=group.id)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
return None, None, None
|
return None, None, None
|
||||||
|
|
||||||
@ -1048,7 +1132,7 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
fields.SnapshotStatus.AVAILABLE)
|
fields.SnapshotStatus.AVAILABLE)
|
||||||
except Exception:
|
except Exception:
|
||||||
snapshot_model_update['status'] = fields.SnapshotStatus.ERROR
|
snapshot_model_update['status'] = fields.SnapshotStatus.ERROR
|
||||||
utils.output_log(
|
self.output_log(
|
||||||
MSG.GROUP_SNAPSHOT_CREATE_FAILED,
|
MSG.GROUP_SNAPSHOT_CREATE_FAILED,
|
||||||
group=group_snapshot.group_id,
|
group=group_snapshot.group_id,
|
||||||
group_snapshot=group_snapshot.id,
|
group_snapshot=group_snapshot.id,
|
||||||
@ -1084,8 +1168,8 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
try:
|
try:
|
||||||
self._delete_pair_from_storage(pair['pvol'], pair['svol'])
|
self._delete_pair_from_storage(pair['pvol'], pair['svol'])
|
||||||
except exception.VolumeDriverException:
|
except exception.VolumeDriverException:
|
||||||
utils.output_log(MSG.DELETE_PAIR_FAILED, pvol=pair['pvol'],
|
self.output_log(MSG.DELETE_PAIR_FAILED, pvol=pair['pvol'],
|
||||||
svol=pair['svol'])
|
svol=pair['svol'])
|
||||||
|
|
||||||
def _create_ctg_snap_pair(self, pairs):
|
def _create_ctg_snap_pair(self, pairs):
|
||||||
snapshotgroup_name = self._create_ctg_snapshot_group_name(
|
snapshotgroup_name = self._create_ctg_snapshot_group_name(
|
||||||
@ -1107,12 +1191,12 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
_MAX_CTG_COUNT_EXCEEDED_ADD_SNAPSHOT) or
|
_MAX_CTG_COUNT_EXCEEDED_ADD_SNAPSHOT) or
|
||||||
(utils.safe_get_err_code(ex.kwargs.get('errobj')) ==
|
(utils.safe_get_err_code(ex.kwargs.get('errobj')) ==
|
||||||
_MAX_PAIR_COUNT_IN_CTG_EXCEEDED_ADD_SNAPSHOT)):
|
_MAX_PAIR_COUNT_IN_CTG_EXCEEDED_ADD_SNAPSHOT)):
|
||||||
msg = utils.output_log(MSG.FAILED_CREATE_CTG_SNAPSHOT)
|
msg = self.output_log(MSG.FAILED_CREATE_CTG_SNAPSHOT)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
elif (utils.safe_get_err_code(ex.kwargs.get('errobj')) ==
|
elif (utils.safe_get_err_code(ex.kwargs.get('errobj')) ==
|
||||||
rest_api.INVALID_SNAPSHOT_POOL and
|
rest_api.INVALID_SNAPSHOT_POOL and
|
||||||
not self.conf.hitachi_snap_pool):
|
not self.conf.hitachi_snap_pool):
|
||||||
msg = utils.output_log(
|
msg = self.output_log(
|
||||||
MSG.INVALID_PARAMETER,
|
MSG.INVALID_PARAMETER,
|
||||||
param=self.driver_info['param_prefix'] +
|
param=self.driver_info['param_prefix'] +
|
||||||
'_snap_pool')
|
'_snap_pool')
|
||||||
@ -1134,9 +1218,9 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
def _create_cgsnapshot_volume(snapshot):
|
def _create_cgsnapshot_volume(snapshot):
|
||||||
pair = {'snapshot': snapshot}
|
pair = {'snapshot': snapshot}
|
||||||
try:
|
try:
|
||||||
pair['pvol'] = utils.get_ldev(snapshot.volume)
|
pair['pvol'] = self.get_ldev(snapshot.volume)
|
||||||
if pair['pvol'] is None:
|
if pair['pvol'] is None:
|
||||||
msg = utils.output_log(
|
msg = self.output_log(
|
||||||
MSG.INVALID_LDEV_FOR_VOLUME_COPY,
|
MSG.INVALID_LDEV_FOR_VOLUME_COPY,
|
||||||
type='volume', id=snapshot.volume_id)
|
type='volume', id=snapshot.volume_id)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
@ -1150,9 +1234,9 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
for snapshot in snapshots:
|
for snapshot in snapshots:
|
||||||
ldev = utils.get_ldev(snapshot.volume)
|
ldev = self.get_ldev(snapshot.volume)
|
||||||
if ldev is None:
|
if ldev is None:
|
||||||
msg = utils.output_log(
|
msg = self.output_log(
|
||||||
MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='volume',
|
MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='volume',
|
||||||
id=snapshot.volume_id)
|
id=snapshot.volume_id)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
@ -1177,7 +1261,7 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
try:
|
try:
|
||||||
self.delete_ldev(pair['svol'])
|
self.delete_ldev(pair['svol'])
|
||||||
except exception.VolumeDriverException:
|
except exception.VolumeDriverException:
|
||||||
utils.output_log(
|
self.output_log(
|
||||||
MSG.DELETE_LDEV_FAILED, ldev=pair['svol'])
|
MSG.DELETE_LDEV_FAILED, ldev=pair['svol'])
|
||||||
model_update = {'status': fields.GroupSnapshotStatus.ERROR}
|
model_update = {'status': fields.GroupSnapshotStatus.ERROR}
|
||||||
for snapshot in snapshots:
|
for snapshot in snapshots:
|
||||||
@ -1199,15 +1283,87 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
else:
|
else:
|
||||||
return self._create_non_cgsnapshot(group_snapshot, snapshots)
|
return self._create_non_cgsnapshot(group_snapshot, snapshots)
|
||||||
|
|
||||||
|
def _init_pair_targets(self, targets_info):
|
||||||
|
self._pair_targets = []
|
||||||
|
for port in targets_info.keys():
|
||||||
|
if not targets_info[port]:
|
||||||
|
continue
|
||||||
|
params = {'portId': port}
|
||||||
|
host_grp_list = self.client.get_host_grps(params)
|
||||||
|
gid = None
|
||||||
|
for host_grp_data in host_grp_list:
|
||||||
|
if host_grp_data['hostGroupName'] == self._PAIR_TARGET_NAME:
|
||||||
|
gid = host_grp_data['hostGroupNumber']
|
||||||
|
break
|
||||||
|
if not gid:
|
||||||
|
try:
|
||||||
|
connector = {
|
||||||
|
'ip': self._PAIR_TARGET_NAME_BODY,
|
||||||
|
'wwpns': [self._PAIR_TARGET_NAME_BODY],
|
||||||
|
}
|
||||||
|
target_name, gid = self.create_target_to_storage(
|
||||||
|
port, connector, None)
|
||||||
|
LOG.debug(
|
||||||
|
'Created host group for pair operation. '
|
||||||
|
'(port: %(port)s, gid: %(gid)s)',
|
||||||
|
{'port': port, 'gid': gid})
|
||||||
|
except exception.VolumeDriverException:
|
||||||
|
self.output_log(MSG.CREATE_HOST_GROUP_FAILED, port=port)
|
||||||
|
continue
|
||||||
|
self._pair_targets.append((port, gid))
|
||||||
|
|
||||||
|
if not self._pair_targets:
|
||||||
|
msg = self.output_log(MSG.PAIR_TARGET_FAILED)
|
||||||
|
self.raise_error(msg)
|
||||||
|
self._pair_targets.sort(reverse=True)
|
||||||
|
LOG.debug('Setting pair_targets: %s', self._pair_targets)
|
||||||
|
|
||||||
|
def init_cinder_hosts(self, **kwargs):
|
||||||
|
targets = {
|
||||||
|
'info': {},
|
||||||
|
'list': [],
|
||||||
|
'iqns': {},
|
||||||
|
'target_map': {},
|
||||||
|
}
|
||||||
|
super(HBSDREST, self).init_cinder_hosts(targets=targets)
|
||||||
|
if self.storage_info['pair_ports']:
|
||||||
|
targets['info'] = {}
|
||||||
|
ports = self._get_pair_ports()
|
||||||
|
for port in ports:
|
||||||
|
targets['info'][port] = True
|
||||||
|
if hasattr(
|
||||||
|
self.conf,
|
||||||
|
self.driver_info['param_prefix'] + '_rest_pair_target_ports'):
|
||||||
|
self._init_pair_targets(targets['info'])
|
||||||
|
|
||||||
|
def initialize_pair_connection(self, ldev):
|
||||||
|
port, gid = None, None
|
||||||
|
|
||||||
|
for port, gid in self._pair_targets:
|
||||||
|
try:
|
||||||
|
targets = {
|
||||||
|
'info': {},
|
||||||
|
'list': [(port, gid)],
|
||||||
|
'lun': {},
|
||||||
|
}
|
||||||
|
return self.map_ldev(targets, ldev)
|
||||||
|
except exception.VolumeDriverException:
|
||||||
|
self.output_log(
|
||||||
|
MSG.MAP_LDEV_FAILED, ldev=ldev, port=port, id=gid,
|
||||||
|
lun=None)
|
||||||
|
|
||||||
|
msg = self.output_log(MSG.MAP_PAIR_TARGET_FAILED, ldev=ldev)
|
||||||
|
self.raise_error(msg)
|
||||||
|
|
||||||
def migrate_volume(self, volume, host, new_type=None):
|
def migrate_volume(self, volume, host, new_type=None):
|
||||||
"""Migrate the specified volume."""
|
"""Migrate the specified volume."""
|
||||||
attachments = volume.volume_attachment
|
attachments = volume.volume_attachment
|
||||||
if attachments:
|
if attachments:
|
||||||
return False, None
|
return False, None
|
||||||
|
|
||||||
pvol = utils.get_ldev(volume)
|
pvol = self.get_ldev(volume)
|
||||||
if pvol is None:
|
if pvol is None:
|
||||||
msg = utils.output_log(
|
msg = self.output_log(
|
||||||
MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='volume', id=volume.id)
|
MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='volume', id=volume.id)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
|
|
||||||
@ -1226,7 +1382,7 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
(pvol, svol, copy_method, status)
|
(pvol, svol, copy_method, status)
|
||||||
for svol, copy_method, status in
|
for svol, copy_method, status in
|
||||||
zip(svols, copy_methods, svol_statuses)]
|
zip(svols, copy_methods, svol_statuses)]
|
||||||
msg = utils.output_log(
|
msg = self.output_log(
|
||||||
MSG.MIGRATE_VOLUME_FAILED,
|
MSG.MIGRATE_VOLUME_FAILED,
|
||||||
volume=volume.id, ldev=pvol,
|
volume=volume.id, ldev=pvol,
|
||||||
pair_info=', '.join(pair_info))
|
pair_info=', '.join(pair_info))
|
||||||
@ -1239,7 +1395,7 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
pair_info = '(%s, %s, %s, %s)' % (
|
pair_info = '(%s, %s, %s, %s)' % (
|
||||||
pair_info['pvol'], svol_info['ldev'],
|
pair_info['pvol'], svol_info['ldev'],
|
||||||
utils.THIN, svol_info['status'])
|
utils.THIN, svol_info['status'])
|
||||||
msg = utils.output_log(
|
msg = self.output_log(
|
||||||
MSG.MIGRATE_VOLUME_FAILED,
|
MSG.MIGRATE_VOLUME_FAILED,
|
||||||
volume=volume.id, ldev=svol_info['ldev'],
|
volume=volume.id, ldev=svol_info['ldev'],
|
||||||
pair_info=pair_info)
|
pair_info=pair_info)
|
||||||
@ -1272,7 +1428,7 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
try:
|
try:
|
||||||
self.delete_ldev(pvol)
|
self.delete_ldev(pvol)
|
||||||
except exception.VolumeDriverException:
|
except exception.VolumeDriverException:
|
||||||
utils.output_log(MSG.DELETE_LDEV_FAILED, ldev=pvol)
|
self.output_log(MSG.DELETE_LDEV_FAILED, ldev=pvol)
|
||||||
|
|
||||||
return True, {
|
return True, {
|
||||||
'provider_location': str(svol),
|
'provider_location': str(svol),
|
||||||
@ -1290,9 +1446,9 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
ldev = utils.get_ldev(volume)
|
ldev = self.get_ldev(volume)
|
||||||
if ldev is None:
|
if ldev is None:
|
||||||
msg = utils.output_log(
|
msg = self.output_log(
|
||||||
MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='volume',
|
MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='volume',
|
||||||
id=volume['id'])
|
id=volume['id'])
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
@ -1313,11 +1469,13 @@ class HBSDREST(common.HBSDCommon):
|
|||||||
self._wait_copy_pair_status(svol, set([SMPL, PSUE]))
|
self._wait_copy_pair_status(svol, set([SMPL, PSUE]))
|
||||||
status = self._get_copy_pair_status(svol)
|
status = self._get_copy_pair_status(svol)
|
||||||
if status == PSUE:
|
if status == PSUE:
|
||||||
msg = utils.output_log(
|
msg = self.output_log(MSG.VOLUME_COPY_FAILED, pvol=pvol, svol=svol)
|
||||||
MSG.VOLUME_COPY_FAILED, pvol=pvol, svol=svol)
|
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
|
|
||||||
def create_target_name(self, connector):
|
def create_target_name(self, connector):
|
||||||
|
if ('ip' in connector and connector['ip']
|
||||||
|
== self._PAIR_TARGET_NAME_BODY):
|
||||||
|
return self._PAIR_TARGET_NAME
|
||||||
wwn = (min(self.get_hba_ids_from_connector(connector)) if
|
wwn = (min(self.get_hba_ids_from_connector(connector)) if
|
||||||
self.format_info['group_name_var_cnt'][
|
self.format_info['group_name_var_cnt'][
|
||||||
common.GROUP_NAME_VAR_WWN] else '')
|
common.GROUP_NAME_VAR_WWN] else '')
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
# Copyright (C) 2020, 2021, Hitachi, Ltd.
|
# Copyright (C) 2020, 2022, Hitachi, Ltd.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
# not use this file except in compliance with the License. You may obtain
|
# not use this file except in compliance with the License. You may obtain
|
||||||
@ -26,8 +26,6 @@ from oslo_service import loopingcall
|
|||||||
from oslo_utils import timeutils
|
from oslo_utils import timeutils
|
||||||
import requests
|
import requests
|
||||||
from requests.adapters import HTTPAdapter
|
from requests.adapters import HTTPAdapter
|
||||||
from requests.packages.urllib3.connection import HTTPConnection
|
|
||||||
from requests.packages.urllib3.poolmanager import PoolManager
|
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _
|
from cinder.i18n import _
|
||||||
@ -46,13 +44,18 @@ _REST_SERVER_RESTART_TIMEOUT = 10 * 60
|
|||||||
_REST_SERVER_ERROR_TIMEOUT = 10 * 60
|
_REST_SERVER_ERROR_TIMEOUT = 10 * 60
|
||||||
_KEEP_SESSION_LOOP_INTERVAL = 3 * 60
|
_KEEP_SESSION_LOOP_INTERVAL = 3 * 60
|
||||||
_ANOTHER_LDEV_MAPPED_RETRY_TIMEOUT = 10 * 60
|
_ANOTHER_LDEV_MAPPED_RETRY_TIMEOUT = 10 * 60
|
||||||
|
_LOCK_RESOURCE_GROUP_TIMEOUT = 3 * 60
|
||||||
|
|
||||||
_TCP_KEEPIDLE = 60
|
_TCP_KEEPIDLE = 60
|
||||||
_TCP_KEEPINTVL = 15
|
_TCP_KEEPINTVL = 15
|
||||||
_TCP_KEEPCNT = 4
|
_TCP_KEEPCNT = 4
|
||||||
|
|
||||||
|
_MIRROR_RESERVED_VIRTUAL_LDEV_ID = 65535
|
||||||
|
|
||||||
_HTTPS = 'https://'
|
_HTTPS = 'https://'
|
||||||
|
|
||||||
|
_NOT_SPECIFIED = 'NotSpecified'
|
||||||
|
|
||||||
_REST_LOCKED_ERRORS = [
|
_REST_LOCKED_ERRORS = [
|
||||||
('2E11', '2205'),
|
('2E11', '2205'),
|
||||||
('2E11', '2207'),
|
('2E11', '2207'),
|
||||||
@ -90,6 +93,13 @@ LOG = logging.getLogger(__name__)
|
|||||||
MSG = utils.HBSDMsg
|
MSG = utils.HBSDMsg
|
||||||
|
|
||||||
|
|
||||||
|
def _get_device_group_name(remote_client, copy_group_name, is_secondary,
|
||||||
|
is_remote=False):
|
||||||
|
if remote_client is None and is_remote:
|
||||||
|
return _NOT_SPECIFIED
|
||||||
|
return copy_group_name + ('S' if is_secondary ^ is_remote else 'P')
|
||||||
|
|
||||||
|
|
||||||
def _build_base_url(ip_addr, ip_port):
|
def _build_base_url(ip_addr, ip_port):
|
||||||
return '%(https)s%(ip)s:%(port)s/ConfigurationManager' % {
|
return '%(https)s%(ip)s:%(port)s/ConfigurationManager' % {
|
||||||
'https': _HTTPS,
|
'https': _HTTPS,
|
||||||
@ -101,7 +111,8 @@ def _build_base_url(ip_addr, ip_port):
|
|||||||
class KeepAliveAdapter(HTTPAdapter):
|
class KeepAliveAdapter(HTTPAdapter):
|
||||||
|
|
||||||
def __init__(self, conf):
|
def __init__(self, conf):
|
||||||
self.options = HTTPConnection.default_socket_options + [
|
self.socket_options = [
|
||||||
|
(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1),
|
||||||
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
|
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
|
||||||
(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
|
(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
|
||||||
conf.hitachi_rest_tcp_keepidle),
|
conf.hitachi_rest_tcp_keepidle),
|
||||||
@ -113,11 +124,9 @@ class KeepAliveAdapter(HTTPAdapter):
|
|||||||
|
|
||||||
super(KeepAliveAdapter, self).__init__()
|
super(KeepAliveAdapter, self).__init__()
|
||||||
|
|
||||||
def init_poolmanager(self, connections, maxsize, block=False):
|
def init_poolmanager(self, *args, **kwargs):
|
||||||
self.poolmanager = PoolManager(num_pools=connections,
|
kwargs['socket_options'] = self.socket_options
|
||||||
maxsize=maxsize,
|
super(KeepAliveAdapter, self).init_poolmanager(*args, **kwargs)
|
||||||
block=block,
|
|
||||||
socket_options=self.options)
|
|
||||||
|
|
||||||
|
|
||||||
class ResponseData(dict):
|
class ResponseData(dict):
|
||||||
@ -226,7 +235,7 @@ class RestApiClient():
|
|||||||
|
|
||||||
def __init__(self, conf, ip_addr, ip_port, storage_device_id,
|
def __init__(self, conf, ip_addr, ip_port, storage_device_id,
|
||||||
user_id, user_pass, driver_prefix, tcp_keepalive=False,
|
user_id, user_pass, driver_prefix, tcp_keepalive=False,
|
||||||
verify=False):
|
verify=False, is_rep=False):
|
||||||
"""Initialize instance variables."""
|
"""Initialize instance variables."""
|
||||||
self.conf = conf
|
self.conf = conf
|
||||||
self.ip_addr = ip_addr
|
self.ip_addr = ip_addr
|
||||||
@ -238,9 +247,12 @@ class RestApiClient():
|
|||||||
self.tcp_keepalive = tcp_keepalive
|
self.tcp_keepalive = tcp_keepalive
|
||||||
self.verify = verify
|
self.verify = verify
|
||||||
self.connect_timeout = self.conf.hitachi_rest_connect_timeout
|
self.connect_timeout = self.conf.hitachi_rest_connect_timeout
|
||||||
|
self.is_rep = is_rep
|
||||||
self.login_lock = threading.Lock()
|
self.login_lock = threading.Lock()
|
||||||
self.keep_session_loop = loopingcall.FixedIntervalLoopingCall(
|
self.keep_session_loop = loopingcall.FixedIntervalLoopingCall(
|
||||||
self._keep_session)
|
self._keep_session)
|
||||||
|
self.nested_count = 0
|
||||||
|
self.resource_lock = threading.Lock()
|
||||||
|
|
||||||
self.base_url = _build_base_url(ip_addr, self.ip_port)
|
self.base_url = _build_base_url(ip_addr, self.ip_port)
|
||||||
self.object_url = '%(base_url)s/v1/objects/storages/%(storage_id)s' % {
|
self.object_url = '%(base_url)s/v1/objects/storages/%(storage_id)s' % {
|
||||||
@ -295,6 +307,10 @@ class RestApiClient():
|
|||||||
else:
|
else:
|
||||||
read_timeout = self.conf.hitachi_rest_get_api_response_timeout
|
read_timeout = self.conf.hitachi_rest_get_api_response_timeout
|
||||||
|
|
||||||
|
remote_auth = kwargs.get('remote_auth')
|
||||||
|
if remote_auth:
|
||||||
|
headers["Remote-Authorization"] = 'Session ' + remote_auth.token
|
||||||
|
|
||||||
auth_data = kwargs.get('auth', self.get_my_session())
|
auth_data = kwargs.get('auth', self.get_my_session())
|
||||||
|
|
||||||
timeout = (self.connect_timeout, read_timeout)
|
timeout = (self.connect_timeout, read_timeout)
|
||||||
@ -320,7 +336,7 @@ class RestApiClient():
|
|||||||
verify=self.verify)
|
verify=self.verify)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
msg = utils.output_log(
|
msg = self.output_log(
|
||||||
MSG.REST_SERVER_CONNECT_FAILED,
|
MSG.REST_SERVER_CONNECT_FAILED,
|
||||||
exception=type(e), message=e,
|
exception=type(e), message=e,
|
||||||
method=method, url=url, params=params, body=body)
|
method=method, url=url, params=params, body=body)
|
||||||
@ -361,11 +377,11 @@ class RestApiClient():
|
|||||||
if (kwargs['no_retry'] or
|
if (kwargs['no_retry'] or
|
||||||
utils.timed_out(
|
utils.timed_out(
|
||||||
start_time, self.conf.hitachi_lock_timeout)):
|
start_time, self.conf.hitachi_lock_timeout)):
|
||||||
msg = utils.output_log(MSG.REST_API_FAILED,
|
msg = self.output_log(MSG.REST_API_FAILED,
|
||||||
no_log=kwargs['no_log'],
|
no_log=kwargs['no_log'],
|
||||||
method=method, url=url,
|
method=method, url=url,
|
||||||
params=params, body=body,
|
params=params, body=body,
|
||||||
**response.get_errobj())
|
**response.get_errobj())
|
||||||
if kwargs['do_raise']:
|
if kwargs['do_raise']:
|
||||||
message = _(
|
message = _(
|
||||||
'%(prefix)s error occurred. %(msg)s' % {
|
'%(prefix)s error occurred. %(msg)s' % {
|
||||||
@ -409,27 +425,27 @@ class RestApiClient():
|
|||||||
retry = False
|
retry = False
|
||||||
elif retry and utils.timed_out(start_time, kwargs['timeout']):
|
elif retry and utils.timed_out(start_time, kwargs['timeout']):
|
||||||
if kwargs['timeout_message']:
|
if kwargs['timeout_message']:
|
||||||
utils.output_log(kwargs['timeout_message'][0],
|
self.output_log(kwargs['timeout_message'][0],
|
||||||
**kwargs['timeout_message'][1])
|
**kwargs['timeout_message'][1])
|
||||||
if response.is_json():
|
if response.is_json():
|
||||||
msg = utils.output_log(MSG.REST_API_TIMEOUT,
|
msg = self.output_log(MSG.REST_API_TIMEOUT,
|
||||||
no_log=kwargs['no_log'],
|
no_log=kwargs['no_log'],
|
||||||
method=method, url=url,
|
method=method, url=url,
|
||||||
params=params, body=body,
|
params=params, body=body,
|
||||||
**response.get_job_result())
|
**response.get_job_result())
|
||||||
if errobj:
|
if errobj:
|
||||||
msg = utils.output_log(MSG.REST_API_FAILED,
|
msg = self.output_log(MSG.REST_API_FAILED,
|
||||||
no_log=kwargs['no_log'],
|
no_log=kwargs['no_log'],
|
||||||
method=method, url=url,
|
method=method, url=url,
|
||||||
params=params, body=body,
|
params=params, body=body,
|
||||||
**response.get_errobj())
|
**response.get_errobj())
|
||||||
else:
|
else:
|
||||||
msg = utils.output_log(MSG.REST_API_HTTP_ERROR,
|
msg = self.output_log(MSG.REST_API_HTTP_ERROR,
|
||||||
no_log=kwargs['no_log'],
|
no_log=kwargs['no_log'],
|
||||||
status_code=response['status_code'],
|
status_code=response['status_code'],
|
||||||
response_body=rsp_body,
|
response_body=rsp_body,
|
||||||
method=method, url=url,
|
method=method, url=url,
|
||||||
params=params, body=body)
|
params=params, body=body)
|
||||||
if kwargs['do_raise']:
|
if kwargs['do_raise']:
|
||||||
message = _(
|
message = _(
|
||||||
'%(prefix)s error occurred. %(msg)s' % {
|
'%(prefix)s error occurred. %(msg)s' % {
|
||||||
@ -448,18 +464,18 @@ class RestApiClient():
|
|||||||
|
|
||||||
if not retry:
|
if not retry:
|
||||||
if response.is_json():
|
if response.is_json():
|
||||||
msg = utils.output_log(MSG.REST_API_FAILED,
|
msg = self.output_log(MSG.REST_API_FAILED,
|
||||||
no_log=kwargs['no_log'],
|
no_log=kwargs['no_log'],
|
||||||
method=method, url=url,
|
method=method, url=url,
|
||||||
params=params, body=body,
|
params=params, body=body,
|
||||||
**response.get_errobj())
|
**response.get_errobj())
|
||||||
else:
|
else:
|
||||||
msg = utils.output_log(MSG.REST_API_HTTP_ERROR,
|
msg = self.output_log(MSG.REST_API_HTTP_ERROR,
|
||||||
no_log=kwargs['no_log'],
|
no_log=kwargs['no_log'],
|
||||||
status_code=response['status_code'],
|
status_code=response['status_code'],
|
||||||
response_body=rsp_body,
|
response_body=rsp_body,
|
||||||
method=method, url=url,
|
method=method, url=url,
|
||||||
params=params, body=body)
|
params=params, body=body)
|
||||||
if kwargs['do_raise']:
|
if kwargs['do_raise']:
|
||||||
message = _(
|
message = _(
|
||||||
'%(prefix)s error occurred. %(msg)s' % {
|
'%(prefix)s error occurred. %(msg)s' % {
|
||||||
@ -471,6 +487,39 @@ class RestApiClient():
|
|||||||
message, errobj=errobj)
|
message, errobj=errobj)
|
||||||
return retry, rsp_body, errobj
|
return retry, rsp_body, errobj
|
||||||
|
|
||||||
|
def lock_resource_group(self, waittime=_LOCK_RESOURCE_GROUP_TIMEOUT):
|
||||||
|
"""Lock resources.
|
||||||
|
|
||||||
|
Lock resources of a resource group allocated to the user who
|
||||||
|
executes API requests, preventing other users from performing
|
||||||
|
operations on the resources.
|
||||||
|
"""
|
||||||
|
with self.resource_lock:
|
||||||
|
if self.nested_count <= 0:
|
||||||
|
url = '%(url)s/resource-group-service/actions/%(action)s' % {
|
||||||
|
'url': self.service_url,
|
||||||
|
'action': 'lock',
|
||||||
|
} + '/invoke'
|
||||||
|
if waittime:
|
||||||
|
body = {"parameters": {"waitTime": waittime}}
|
||||||
|
self._invoke(url, body=body, timeout=waittime)
|
||||||
|
else:
|
||||||
|
self._invoke(url)
|
||||||
|
self.nested_count += 1
|
||||||
|
|
||||||
|
def unlock_resource_group(self):
|
||||||
|
"""If the lock is already released, there is no need to unlock."""
|
||||||
|
with self.resource_lock:
|
||||||
|
if self.nested_count == 0:
|
||||||
|
return
|
||||||
|
self.nested_count -= 1
|
||||||
|
if self.nested_count <= 0:
|
||||||
|
url = '%(url)s/resource-group-service/actions/%(action)s' % {
|
||||||
|
'url': self.service_url,
|
||||||
|
'action': 'unlock',
|
||||||
|
} + '/invoke'
|
||||||
|
self._invoke(url)
|
||||||
|
|
||||||
def set_my_session(self, session):
|
def set_my_session(self, session):
|
||||||
self.session = session
|
self.session = session
|
||||||
|
|
||||||
@ -527,7 +576,7 @@ class RestApiClient():
|
|||||||
LOG.debug("Trying to re-login.")
|
LOG.debug("Trying to re-login.")
|
||||||
retry = self._login(do_raise=False)
|
retry = self._login(do_raise=False)
|
||||||
if not retry:
|
if not retry:
|
||||||
utils.output_log(
|
self.output_log(
|
||||||
MSG.REST_LOGIN_FAILED,
|
MSG.REST_LOGIN_FAILED,
|
||||||
no_log=no_log, user=self.user_id)
|
no_log=no_log, user=self.user_id)
|
||||||
return retry
|
return retry
|
||||||
@ -838,3 +887,171 @@ class RestApiClient():
|
|||||||
'action': 'discard-zero-page',
|
'action': 'discard-zero-page',
|
||||||
}
|
}
|
||||||
self._invoke(url)
|
self._invoke(url)
|
||||||
|
|
||||||
|
def get_remote_copy_grps(self, remote_client):
|
||||||
|
url = '%(url)s/remote-mirror-copygroups' % {
|
||||||
|
'url': self.object_url,
|
||||||
|
}
|
||||||
|
params = {"remoteStorageDeviceId": remote_client.storage_id}
|
||||||
|
with RemoteSession(remote_client) as session:
|
||||||
|
return self._get_objects(url, params=params, remote_auth=session)
|
||||||
|
|
||||||
|
def get_remote_copy_grp(self, remote_client, copy_group_name, **kwargs):
|
||||||
|
url = '%(url)s/remote-mirror-copygroups/%(id)s' % {
|
||||||
|
'url': self.object_url,
|
||||||
|
'id': self._remote_copygroup_id(remote_client, copy_group_name),
|
||||||
|
}
|
||||||
|
with RemoteSession(remote_client) as session:
|
||||||
|
return self._get_object(url, remote_auth=session, **kwargs)
|
||||||
|
|
||||||
|
def get_remote_copypair(self, remote_client, copy_group_name,
|
||||||
|
pvol_ldev_id, svol_ldev_id, is_secondary=False,
|
||||||
|
**kwargs):
|
||||||
|
url = '%(url)s/remote-mirror-copypairs/%(id)s' % {
|
||||||
|
'url': self.object_url,
|
||||||
|
'id': self._remote_copypair_id(
|
||||||
|
remote_client, copy_group_name, pvol_ldev_id, svol_ldev_id,
|
||||||
|
is_secondary),
|
||||||
|
}
|
||||||
|
if remote_client:
|
||||||
|
with RemoteSession(remote_client) as session:
|
||||||
|
return self._get_object(url, remote_auth=session, **kwargs)
|
||||||
|
return self._get_object(url, **kwargs)
|
||||||
|
|
||||||
|
def add_remote_copypair(self, remote_client, body):
|
||||||
|
url = '%(url)s/remote-mirror-copypairs' % {
|
||||||
|
'url': self.object_url,
|
||||||
|
}
|
||||||
|
if self.storage_id > remote_client.storage_id:
|
||||||
|
client1, client2 = self, remote_client
|
||||||
|
else:
|
||||||
|
client1, client2 = remote_client, self
|
||||||
|
with ResourceGroupLock(client1):
|
||||||
|
with ResourceGroupLock(client2):
|
||||||
|
session = remote_client.get_my_session()
|
||||||
|
return self._add_object(url, body=body,
|
||||||
|
no_relogin=True,
|
||||||
|
remote_auth=session,
|
||||||
|
job_nowait=True)[0]
|
||||||
|
|
||||||
|
@utils.synchronized_on_copy_group()
|
||||||
|
def split_remote_copypair(self, remote_client, copy_group_name,
|
||||||
|
pvol_ldev_id, svol_ldev_id, rep_type):
|
||||||
|
body = {"parameters": {"replicationType": rep_type}}
|
||||||
|
url = '%(url)s/remote-mirror-copypairs/%(id)s/actions/%(action)s' % {
|
||||||
|
'url': self.object_url,
|
||||||
|
'id': self._remote_copypair_id(remote_client, copy_group_name,
|
||||||
|
pvol_ldev_id, svol_ldev_id),
|
||||||
|
'action': 'split',
|
||||||
|
} + '/invoke'
|
||||||
|
with RemoteSession(remote_client) as session:
|
||||||
|
self._invoke(url, body=body, remote_auth=session, job_nowait=True)
|
||||||
|
|
||||||
|
@utils.synchronized_on_copy_group()
|
||||||
|
def resync_remote_copypair(
|
||||||
|
self, remote_client, copy_group_name, pvol_ldev_id, svol_ldev_id,
|
||||||
|
rep_type, copy_speed=None):
|
||||||
|
body = {"parameters": {"replicationType": rep_type}}
|
||||||
|
if copy_speed:
|
||||||
|
body["parameters"]["copyPace"] = copy_speed
|
||||||
|
url = '%(url)s/remote-mirror-copypairs/%(id)s/actions/%(action)s' % {
|
||||||
|
'url': self.object_url,
|
||||||
|
'id': self._remote_copypair_id(remote_client, copy_group_name,
|
||||||
|
pvol_ldev_id, svol_ldev_id),
|
||||||
|
'action': 'resync',
|
||||||
|
} + '/invoke'
|
||||||
|
with RemoteSession(remote_client) as session:
|
||||||
|
self._invoke(url, body=body, remote_auth=session, job_nowait=True)
|
||||||
|
|
||||||
|
@utils.synchronized_on_copy_group()
|
||||||
|
def delete_remote_copypair(self, remote_client, copy_group_name,
|
||||||
|
pvol_ldev_id, svol_ldev_id):
|
||||||
|
url = '%(url)s/remote-mirror-copypairs/%(id)s' % {
|
||||||
|
'url': self.object_url,
|
||||||
|
'id': self._remote_copypair_id(
|
||||||
|
remote_client, copy_group_name, pvol_ldev_id, svol_ldev_id),
|
||||||
|
}
|
||||||
|
if self.storage_id > remote_client.storage_id:
|
||||||
|
client1, client2 = self, remote_client
|
||||||
|
else:
|
||||||
|
client1, client2 = remote_client, self
|
||||||
|
with ResourceGroupLock(client1):
|
||||||
|
with ResourceGroupLock(client2):
|
||||||
|
session = remote_client.get_my_session()
|
||||||
|
self._delete_object(
|
||||||
|
url, no_relogin=True, remote_auth=session)
|
||||||
|
|
||||||
|
def _remote_copygroup_id(self, remote_client, copy_group_name,
|
||||||
|
is_secondary=False):
|
||||||
|
storage_id = (remote_client.storage_id if remote_client
|
||||||
|
else _NOT_SPECIFIED)
|
||||||
|
return "%s,%s,%s,%s" % (
|
||||||
|
storage_id,
|
||||||
|
copy_group_name,
|
||||||
|
_get_device_group_name(remote_client, copy_group_name,
|
||||||
|
is_secondary),
|
||||||
|
_get_device_group_name(remote_client, copy_group_name,
|
||||||
|
is_secondary, is_remote=True))
|
||||||
|
|
||||||
|
def _remote_copypair_id(self, remote_client, copy_group_name,
|
||||||
|
pvol_ldev_id, svol_ldev_id, is_secondary=False):
|
||||||
|
return "%s,HBSD-LDEV-%d-%d" % (
|
||||||
|
self._remote_copygroup_id(remote_client, copy_group_name,
|
||||||
|
is_secondary),
|
||||||
|
pvol_ldev_id,
|
||||||
|
svol_ldev_id)
|
||||||
|
|
||||||
|
def assign_virtual_ldevid(
|
||||||
|
self, ldev_id,
|
||||||
|
virtual_ldev_id=_MIRROR_RESERVED_VIRTUAL_LDEV_ID):
|
||||||
|
url = '%(url)s/ldevs/%(id)s/actions/%(action)s/invoke' % {
|
||||||
|
'url': self.object_url,
|
||||||
|
'id': ldev_id,
|
||||||
|
'action': 'assign-virtual-ldevid',
|
||||||
|
}
|
||||||
|
body = {"parameters": {"virtualLdevId": virtual_ldev_id}}
|
||||||
|
ignore_error = [('2E21', '9305'), ('2E30', '0088')]
|
||||||
|
self._invoke(url, body=body, ignore_error=ignore_error)
|
||||||
|
|
||||||
|
def unassign_virtual_ldevid(
|
||||||
|
self, ldev_id,
|
||||||
|
virtual_ldev_id=_MIRROR_RESERVED_VIRTUAL_LDEV_ID):
|
||||||
|
url = '%(url)s/ldevs/%(id)s/actions/%(action)s/invoke' % {
|
||||||
|
'url': self.object_url,
|
||||||
|
'id': ldev_id,
|
||||||
|
'action': 'unassign-virtual-ldevid',
|
||||||
|
}
|
||||||
|
body = {"parameters": {"virtualLdevId": virtual_ldev_id}}
|
||||||
|
self._invoke(url, body=body)
|
||||||
|
|
||||||
|
def output_log(self, msg_enum, **kwargs):
|
||||||
|
if self.is_rep:
|
||||||
|
return utils.output_log(
|
||||||
|
msg_enum, storage_id=self.storage_id, **kwargs)
|
||||||
|
else:
|
||||||
|
return utils.output_log(msg_enum, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class RemoteSession(object):
|
||||||
|
|
||||||
|
def __init__(self, remote_client):
|
||||||
|
self.remote_client = remote_client
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
return self.remote_client.get_my_session()
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_value, traceback):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ResourceGroupLock(object):
|
||||||
|
|
||||||
|
def __init__(self, client):
|
||||||
|
self.client = client
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
self.client.lock_resource_group()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_value, traceback):
|
||||||
|
self.client.unlock_resource_group()
|
||||||
|
@ -57,6 +57,12 @@ class HBSDRESTFC(rest.HBSDREST):
|
|||||||
"""Prepare for using the storage."""
|
"""Prepare for using the storage."""
|
||||||
target_ports = self.conf.hitachi_target_ports
|
target_ports = self.conf.hitachi_target_ports
|
||||||
compute_target_ports = self.conf.hitachi_compute_target_ports
|
compute_target_ports = self.conf.hitachi_compute_target_ports
|
||||||
|
if hasattr(
|
||||||
|
self.conf,
|
||||||
|
self.driver_info['param_prefix'] + '_rest_pair_target_ports'):
|
||||||
|
pair_target_ports = self.conf.hitachi_rest_pair_target_ports
|
||||||
|
else:
|
||||||
|
pair_target_ports = []
|
||||||
available_ports = []
|
available_ports = []
|
||||||
available_compute_ports = []
|
available_compute_ports = []
|
||||||
|
|
||||||
@ -64,13 +70,15 @@ class HBSDRESTFC(rest.HBSDREST):
|
|||||||
# The port attributes must contain TAR.
|
# The port attributes must contain TAR.
|
||||||
params = {'portAttributes': 'TAR'}
|
params = {'portAttributes': 'TAR'}
|
||||||
port_list = self.client.get_ports(params=params)
|
port_list = self.client.get_ports(params=params)
|
||||||
for port in set(target_ports + compute_target_ports):
|
for port in set(target_ports + compute_target_ports +
|
||||||
|
pair_target_ports):
|
||||||
if port not in [port_data['portId'] for port_data in port_list]:
|
if port not in [port_data['portId'] for port_data in port_list]:
|
||||||
utils.output_log(MSG.INVALID_PORT, port=port,
|
self.output_log(MSG.INVALID_PORT, port=port,
|
||||||
additional_info='portAttributes: not TAR')
|
additional_info='portAttributes: not TAR')
|
||||||
for port_data in port_list:
|
for port_data in port_list:
|
||||||
port = port_data['portId']
|
port = port_data['portId']
|
||||||
if port not in set(target_ports + compute_target_ports):
|
if port not in set(target_ports + compute_target_ports +
|
||||||
|
pair_target_ports):
|
||||||
continue
|
continue
|
||||||
secure_fc_port = True
|
secure_fc_port = True
|
||||||
can_port_schedule = True
|
can_port_schedule = True
|
||||||
@ -89,7 +97,7 @@ class HBSDRESTFC(rest.HBSDREST):
|
|||||||
port_data.get('portConnection') == 'PtoP')):
|
port_data.get('portConnection') == 'PtoP')):
|
||||||
can_port_schedule = False
|
can_port_schedule = False
|
||||||
if not secure_fc_port or not can_port_schedule:
|
if not secure_fc_port or not can_port_schedule:
|
||||||
utils.output_log(
|
self.output_log(
|
||||||
MSG.INVALID_PORT, port=port,
|
MSG.INVALID_PORT, port=port,
|
||||||
additional_info='portType: %s, lunSecuritySetting: %s, '
|
additional_info='portType: %s, lunSecuritySetting: %s, '
|
||||||
'fabricMode: %s, portConnection: %s' %
|
'fabricMode: %s, portConnection: %s' %
|
||||||
@ -107,6 +115,8 @@ class HBSDRESTFC(rest.HBSDREST):
|
|||||||
can_port_schedule):
|
can_port_schedule):
|
||||||
available_compute_ports.append(port)
|
available_compute_ports.append(port)
|
||||||
self.storage_info['wwns'][port] = wwn
|
self.storage_info['wwns'][port] = wwn
|
||||||
|
if pair_target_ports and port in pair_target_ports:
|
||||||
|
self.storage_info['pair_ports'].append(port)
|
||||||
|
|
||||||
if target_ports:
|
if target_ports:
|
||||||
for port in target_ports:
|
for port in target_ports:
|
||||||
@ -118,8 +128,14 @@ class HBSDRESTFC(rest.HBSDREST):
|
|||||||
self.storage_info['compute_ports'].append(port)
|
self.storage_info['compute_ports'].append(port)
|
||||||
|
|
||||||
self.check_ports_info()
|
self.check_ports_info()
|
||||||
utils.output_log(MSG.SET_CONFIG_VALUE, object='port-wwn list',
|
if pair_target_ports and not self.storage_info['pair_ports']:
|
||||||
value=self.storage_info['wwns'])
|
msg = self.output_log(
|
||||||
|
MSG.RESOURCE_NOT_FOUND, resource="Pair target ports")
|
||||||
|
self.raise_error(msg)
|
||||||
|
self.output_log(MSG.SET_CONFIG_VALUE, object='pair_target_ports',
|
||||||
|
value=self.storage_info['pair_ports'])
|
||||||
|
self.output_log(MSG.SET_CONFIG_VALUE, object='port-wwn list',
|
||||||
|
value=self.storage_info['wwns'])
|
||||||
|
|
||||||
def check_param(self):
|
def check_param(self):
|
||||||
"""Check parameter values and consistency among them."""
|
"""Check parameter values and consistency among them."""
|
||||||
@ -150,15 +166,15 @@ class HBSDRESTFC(rest.HBSDREST):
|
|||||||
self.client.add_hba_wwn(port, gid, wwn, no_log=True)
|
self.client.add_hba_wwn(port, gid, wwn, no_log=True)
|
||||||
registered_wwns.append(wwn)
|
registered_wwns.append(wwn)
|
||||||
except exception.VolumeDriverException as ex:
|
except exception.VolumeDriverException as ex:
|
||||||
utils.output_log(MSG.ADD_HBA_WWN_FAILED, port=port, gid=gid,
|
self.output_log(MSG.ADD_HBA_WWN_FAILED, port=port, gid=gid,
|
||||||
wwn=wwn)
|
wwn=wwn)
|
||||||
if (self.get_port_scheduler_param() and
|
if (self.get_port_scheduler_param() and
|
||||||
utils.safe_get_err_code(ex.kwargs.get('errobj'))
|
utils.safe_get_err_code(ex.kwargs.get('errobj'))
|
||||||
== rest_api.EXCEED_WWN_MAX):
|
== rest_api.EXCEED_WWN_MAX):
|
||||||
raise ex
|
raise ex
|
||||||
if not registered_wwns:
|
if not registered_wwns:
|
||||||
msg = utils.output_log(MSG.NO_HBA_WWN_ADDED_TO_HOST_GRP, port=port,
|
msg = self.output_log(MSG.NO_HBA_WWN_ADDED_TO_HOST_GRP, port=port,
|
||||||
gid=gid)
|
gid=gid)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
|
|
||||||
def set_target_mode(self, port, gid):
|
def set_target_mode(self, port, gid):
|
||||||
@ -265,10 +281,12 @@ class HBSDRESTFC(rest.HBSDREST):
|
|||||||
|
|
||||||
return not_found_count
|
return not_found_count
|
||||||
|
|
||||||
def initialize_connection(self, volume, connector, is_snapshot=False):
|
def initialize_connection(
|
||||||
|
self, volume, connector, is_snapshot=False, lun=None,
|
||||||
|
is_mirror=False):
|
||||||
"""Initialize connection between the server and the volume."""
|
"""Initialize connection between the server and the volume."""
|
||||||
conn_info, map_info = super(HBSDRESTFC, self).initialize_connection(
|
conn_info, map_info = super(HBSDRESTFC, self).initialize_connection(
|
||||||
volume, connector, is_snapshot)
|
volume, connector, is_snapshot, lun)
|
||||||
if self.conf.hitachi_zoning_request:
|
if self.conf.hitachi_zoning_request:
|
||||||
if (self.get_port_scheduler_param() and
|
if (self.get_port_scheduler_param() and
|
||||||
not self.is_controller(connector)):
|
not self.is_controller(connector)):
|
||||||
@ -279,10 +297,11 @@ class HBSDRESTFC(rest.HBSDREST):
|
|||||||
self._lookup_service)
|
self._lookup_service)
|
||||||
if init_targ_map:
|
if init_targ_map:
|
||||||
conn_info['data']['initiator_target_map'] = init_targ_map
|
conn_info['data']['initiator_target_map'] = init_targ_map
|
||||||
fczm_utils.add_fc_zone(conn_info)
|
if not is_mirror:
|
||||||
|
fczm_utils.add_fc_zone(conn_info)
|
||||||
return conn_info
|
return conn_info
|
||||||
|
|
||||||
def terminate_connection(self, volume, connector):
|
def terminate_connection(self, volume, connector, is_mirror=False):
|
||||||
"""Terminate connection between the server and the volume."""
|
"""Terminate connection between the server and the volume."""
|
||||||
conn_info = super(HBSDRESTFC, self).terminate_connection(
|
conn_info = super(HBSDRESTFC, self).terminate_connection(
|
||||||
volume, connector)
|
volume, connector)
|
||||||
@ -293,7 +312,8 @@ class HBSDRESTFC(rest.HBSDREST):
|
|||||||
self._lookup_service)
|
self._lookup_service)
|
||||||
if init_targ_map:
|
if init_targ_map:
|
||||||
conn_info['data']['initiator_target_map'] = init_targ_map
|
conn_info['data']['initiator_target_map'] = init_targ_map
|
||||||
fczm_utils.remove_fc_zone(conn_info)
|
if not is_mirror:
|
||||||
|
fczm_utils.remove_fc_zone(conn_info)
|
||||||
return conn_info
|
return conn_info
|
||||||
|
|
||||||
def _get_wwpns(self, port, hostgroup):
|
def _get_wwpns(self, port, hostgroup):
|
||||||
@ -335,8 +355,8 @@ class HBSDRESTFC(rest.HBSDREST):
|
|||||||
|
|
||||||
active_hba_ids = list(set(active_hba_ids))
|
active_hba_ids = list(set(active_hba_ids))
|
||||||
if not active_hba_ids:
|
if not active_hba_ids:
|
||||||
msg = utils.output_log(MSG.NO_ACTIVE_WWN, wwn=', '.join(hba_ids),
|
msg = self.output_log(MSG.NO_ACTIVE_WWN, wwn=', '.join(hba_ids),
|
||||||
volume=vol_id)
|
volume=vol_id)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
|
|
||||||
active_target_wwns = list(set(active_target_wwns))
|
active_target_wwns = list(set(active_target_wwns))
|
||||||
@ -347,7 +367,7 @@ class HBSDRESTFC(rest.HBSDREST):
|
|||||||
port_wwns += ", "
|
port_wwns += ", "
|
||||||
port_wwns += ("port, WWN: " + port +
|
port_wwns += ("port, WWN: " + port +
|
||||||
", " + self.storage_info['wwns'][port])
|
", " + self.storage_info['wwns'][port])
|
||||||
msg = utils.output_log(
|
msg = self.output_log(
|
||||||
MSG.NO_PORT_WITH_ACTIVE_WWN, port_wwns=port_wwns,
|
MSG.NO_PORT_WITH_ACTIVE_WWN, port_wwns=port_wwns,
|
||||||
volume=vol_id)
|
volume=vol_id)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
@ -371,17 +391,17 @@ class HBSDRESTFC(rest.HBSDREST):
|
|||||||
== rest_api.MSGID_SPECIFIED_OBJECT_DOES_NOT_EXIST)
|
== rest_api.MSGID_SPECIFIED_OBJECT_DOES_NOT_EXIST)
|
||||||
or (_MSG_EXCEED_HOST_GROUP_MAX
|
or (_MSG_EXCEED_HOST_GROUP_MAX
|
||||||
in utils.safe_get_message(ex.kwargs.get('errobj')))):
|
in utils.safe_get_message(ex.kwargs.get('errobj')))):
|
||||||
utils.output_log(
|
self.output_log(
|
||||||
MSG.HOST_GROUP_NUMBER_IS_MAXIMUM, port=ports[index])
|
MSG.HOST_GROUP_NUMBER_IS_MAXIMUM, port=ports[index])
|
||||||
elif (utils.safe_get_err_code(ex.kwargs.get('errobj'))
|
elif (utils.safe_get_err_code(ex.kwargs.get('errobj'))
|
||||||
== rest_api.EXCEED_WWN_MAX):
|
== rest_api.EXCEED_WWN_MAX):
|
||||||
utils.output_log(
|
self.output_log(
|
||||||
MSG.WWN_NUMBER_IS_MAXIMUM, port=ports[index],
|
MSG.WWN_NUMBER_IS_MAXIMUM, port=ports[index],
|
||||||
wwn=", ". join(hba_ids))
|
wwn=", ". join(hba_ids))
|
||||||
else:
|
else:
|
||||||
raise ex
|
raise ex
|
||||||
|
|
||||||
msg = utils.output_log(
|
msg = self.output_log(
|
||||||
MSG.HOST_GROUP_OR_WWN_IS_NOT_AVAILABLE, ports=', '.join(ports))
|
MSG.HOST_GROUP_OR_WWN_IS_NOT_AVAILABLE, ports=', '.join(ports))
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
|
|
||||||
@ -391,7 +411,7 @@ class HBSDRESTFC(rest.HBSDREST):
|
|||||||
active_ports = []
|
active_ports = []
|
||||||
|
|
||||||
if not devmap:
|
if not devmap:
|
||||||
msg = utils.output_log(MSG.ZONE_MANAGER_IS_NOT_AVAILABLE)
|
msg = self.output_log(MSG.ZONE_MANAGER_IS_NOT_AVAILABLE)
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
for fabric_name in devmap.keys():
|
for fabric_name in devmap.keys():
|
||||||
available_ports = []
|
available_ports = []
|
||||||
@ -409,7 +429,7 @@ class HBSDRESTFC(rest.HBSDREST):
|
|||||||
if port in available_ports and port in filter_ports:
|
if port in available_ports and port in filter_ports:
|
||||||
active_ports.append(port)
|
active_ports.append(port)
|
||||||
elif port not in available_ports and port in filter_ports:
|
elif port not in available_ports and port in filter_ports:
|
||||||
utils.output_log(
|
self.output_log(
|
||||||
MSG.INVALID_PORT_BY_ZONE_MANAGER, port=port)
|
MSG.INVALID_PORT_BY_ZONE_MANAGER, port=port)
|
||||||
for wwpns in wwpn_groups:
|
for wwpns in wwpn_groups:
|
||||||
try:
|
try:
|
||||||
|
@ -46,20 +46,28 @@ class HBSDRESTISCSI(rest.HBSDREST):
|
|||||||
"""Prepare for using the storage."""
|
"""Prepare for using the storage."""
|
||||||
target_ports = self.conf.hitachi_target_ports
|
target_ports = self.conf.hitachi_target_ports
|
||||||
compute_target_ports = self.conf.hitachi_compute_target_ports
|
compute_target_ports = self.conf.hitachi_compute_target_ports
|
||||||
|
if hasattr(
|
||||||
|
self.conf,
|
||||||
|
self.driver_info['param_prefix'] + '_rest_pair_target_ports'):
|
||||||
|
pair_target_ports = self.conf.hitachi_rest_pair_target_ports
|
||||||
|
else:
|
||||||
|
pair_target_ports = []
|
||||||
|
|
||||||
super(HBSDRESTISCSI, self).connect_storage()
|
super(HBSDRESTISCSI, self).connect_storage()
|
||||||
# The port type must be ISCSI and the port attributes must contain TAR.
|
# The port type must be ISCSI and the port attributes must contain TAR.
|
||||||
params = {'portType': 'ISCSI',
|
params = {'portType': 'ISCSI',
|
||||||
'portAttributes': 'TAR'}
|
'portAttributes': 'TAR'}
|
||||||
port_list = self.client.get_ports(params=params)
|
port_list = self.client.get_ports(params=params)
|
||||||
for port in set(target_ports + compute_target_ports):
|
for port in set(target_ports + compute_target_ports +
|
||||||
|
pair_target_ports):
|
||||||
if port not in [port_data['portId'] for port_data in port_list]:
|
if port not in [port_data['portId'] for port_data in port_list]:
|
||||||
utils.output_log(
|
self.output_log(
|
||||||
MSG.INVALID_PORT, port=port, additional_info='(portType, '
|
MSG.INVALID_PORT, port=port, additional_info='(portType, '
|
||||||
'portAttributes): not (ISCSI, TAR)')
|
'portAttributes): not (ISCSI, TAR)')
|
||||||
for port_data in port_list:
|
for port_data in port_list:
|
||||||
port = port_data['portId']
|
port = port_data['portId']
|
||||||
if port not in set(target_ports + compute_target_ports):
|
if port not in set(target_ports + compute_target_ports +
|
||||||
|
pair_target_ports):
|
||||||
continue
|
continue
|
||||||
has_addr = True
|
has_addr = True
|
||||||
if not port_data['lunSecuritySetting']:
|
if not port_data['lunSecuritySetting']:
|
||||||
@ -70,7 +78,7 @@ class HBSDRESTISCSI(rest.HBSDREST):
|
|||||||
addr_info = (', ipv4Address: %s, tcpPort: %s' %
|
addr_info = (', ipv4Address: %s, tcpPort: %s' %
|
||||||
(ipv4_addr, tcp_port))
|
(ipv4_addr, tcp_port))
|
||||||
if not port_data['lunSecuritySetting'] or not has_addr:
|
if not port_data['lunSecuritySetting'] or not has_addr:
|
||||||
utils.output_log(
|
self.output_log(
|
||||||
MSG.INVALID_PORT, port=port,
|
MSG.INVALID_PORT, port=port,
|
||||||
additional_info='portType: %s, lunSecuritySetting: %s%s' %
|
additional_info='portType: %s, lunSecuritySetting: %s%s' %
|
||||||
(port_data['portType'], port_data['lunSecuritySetting'],
|
(port_data['portType'], port_data['lunSecuritySetting'],
|
||||||
@ -82,11 +90,20 @@ class HBSDRESTISCSI(rest.HBSDREST):
|
|||||||
if (compute_target_ports and port in compute_target_ports and
|
if (compute_target_ports and port in compute_target_ports and
|
||||||
has_addr):
|
has_addr):
|
||||||
self.storage_info['compute_ports'].append(port)
|
self.storage_info['compute_ports'].append(port)
|
||||||
|
if pair_target_ports and port in pair_target_ports:
|
||||||
|
self.storage_info['pair_ports'].append(port)
|
||||||
|
|
||||||
self.check_ports_info()
|
self.check_ports_info()
|
||||||
utils.output_log(MSG.SET_CONFIG_VALUE,
|
if pair_target_ports and not self.storage_info['pair_ports']:
|
||||||
object='port-<IP address:port> list',
|
msg = self.output_log(
|
||||||
value=self.storage_info['portals'])
|
MSG.RESOURCE_NOT_FOUND, resource="Pair target ports")
|
||||||
|
self.raise_error(msg)
|
||||||
|
self.output_log(MSG.SET_CONFIG_VALUE,
|
||||||
|
object='pair_target_ports',
|
||||||
|
value=self.storage_info['pair_ports'])
|
||||||
|
self.output_log(MSG.SET_CONFIG_VALUE,
|
||||||
|
object='port-<IP address:port> list',
|
||||||
|
value=self.storage_info['portals'])
|
||||||
|
|
||||||
def create_target_to_storage(self, port, connector, hba_ids):
|
def create_target_to_storage(self, port, connector, hba_ids):
|
||||||
"""Create an iSCSI target on the specified port."""
|
"""Create an iSCSI target on the specified port."""
|
||||||
@ -194,12 +211,19 @@ class HBSDRESTISCSI(rest.HBSDREST):
|
|||||||
not_found_count += 1
|
not_found_count += 1
|
||||||
return not_found_count
|
return not_found_count
|
||||||
|
|
||||||
def initialize_connection(self, volume, connector, is_snapshot=False):
|
def initialize_connection(
|
||||||
|
self, volume, connector, is_snapshot=False, lun=None,
|
||||||
|
is_mirror=False):
|
||||||
"""Initialize connection between the server and the volume."""
|
"""Initialize connection between the server and the volume."""
|
||||||
conn_info, map_info = super(HBSDRESTISCSI, self).initialize_connection(
|
conn_info, map_info = super(HBSDRESTISCSI, self).initialize_connection(
|
||||||
volume, connector, is_snapshot)
|
volume, connector, is_snapshot, lun)
|
||||||
return conn_info
|
return conn_info
|
||||||
|
|
||||||
|
def terminate_connection(self, volume, connector, is_mirror=False):
|
||||||
|
"""Terminate connection between the server and the volume."""
|
||||||
|
return super(HBSDRESTISCSI, self).terminate_connection(
|
||||||
|
volume, connector)
|
||||||
|
|
||||||
def get_properties_iscsi(self, targets, multipath):
|
def get_properties_iscsi(self, targets, multipath):
|
||||||
"""Return iSCSI-specific server-LDEV connection info."""
|
"""Return iSCSI-specific server-LDEV connection info."""
|
||||||
if not multipath:
|
if not multipath:
|
||||||
@ -213,8 +237,8 @@ class HBSDRESTISCSI(rest.HBSDREST):
|
|||||||
target_info = self.client.get_host_grp(port, gid)
|
target_info = self.client.get_host_grp(port, gid)
|
||||||
iqn = target_info.get('iscsiName') if target_info else None
|
iqn = target_info.get('iscsiName') if target_info else None
|
||||||
if not iqn:
|
if not iqn:
|
||||||
msg = utils.output_log(MSG.RESOURCE_NOT_FOUND,
|
msg = self.output_log(MSG.RESOURCE_NOT_FOUND,
|
||||||
resource='Target IQN')
|
resource='Target IQN')
|
||||||
self.raise_error(msg)
|
self.raise_error(msg)
|
||||||
targets['iqns'][target] = iqn
|
targets['iqns'][target] = iqn
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
|
@ -15,17 +15,17 @@
|
|||||||
"""Utility module for Hitachi HBSD Driver."""
|
"""Utility module for Hitachi HBSD Driver."""
|
||||||
|
|
||||||
import enum
|
import enum
|
||||||
|
import functools
|
||||||
import logging as base_logging
|
import logging as base_logging
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import excutils
|
|
||||||
from oslo_utils import timeutils
|
from oslo_utils import timeutils
|
||||||
from oslo_utils import units
|
from oslo_utils import units
|
||||||
|
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
|
from cinder import utils as cinder_utils
|
||||||
|
|
||||||
VERSION = '2.3.2'
|
VERSION = '2.3.3'
|
||||||
CI_WIKI_NAME = 'Hitachi_VSP_CI'
|
CI_WIKI_NAME = 'Hitachi_VSP_CI'
|
||||||
PARAM_PREFIX = 'hitachi'
|
PARAM_PREFIX = 'hitachi'
|
||||||
VENDOR_NAME = 'Hitachi'
|
VENDOR_NAME = 'Hitachi'
|
||||||
@ -38,9 +38,13 @@ HDT_VOL_ATTR = 'HDT'
|
|||||||
NVOL_LDEV_TYPE = 'DP-VOL'
|
NVOL_LDEV_TYPE = 'DP-VOL'
|
||||||
TARGET_IQN_SUFFIX = '.hbsd-target'
|
TARGET_IQN_SUFFIX = '.hbsd-target'
|
||||||
PAIR_ATTR = 'HTI'
|
PAIR_ATTR = 'HTI'
|
||||||
|
MIRROR_ATTR = 'GAD'
|
||||||
|
|
||||||
GIGABYTE_PER_BLOCK_SIZE = units.Gi / 512
|
GIGABYTE_PER_BLOCK_SIZE = units.Gi / 512
|
||||||
|
|
||||||
|
PRIMARY_STR = 'primary'
|
||||||
|
SECONDARY_STR = 'secondary'
|
||||||
|
|
||||||
NORMAL_LDEV_TYPE = 'Normal'
|
NORMAL_LDEV_TYPE = 'Normal'
|
||||||
|
|
||||||
FULL = 'Full copy'
|
FULL = 'Full copy'
|
||||||
@ -202,6 +206,20 @@ class HBSDMsg(enum.Enum):
|
|||||||
'(port: %(port)s, WWN: %(wwn)s)',
|
'(port: %(port)s, WWN: %(wwn)s)',
|
||||||
'suffix': WARNING_SUFFIX,
|
'suffix': WARNING_SUFFIX,
|
||||||
}
|
}
|
||||||
|
REPLICATION_VOLUME_OPERATION_FAILED = {
|
||||||
|
'msg_id': 337,
|
||||||
|
'loglevel': base_logging.WARNING,
|
||||||
|
'msg': 'Failed to %(operation)s the %(type)s in a replication pair. '
|
||||||
|
'(volume: %(volume_id)s, reason: %(reason)s)',
|
||||||
|
'suffix': WARNING_SUFFIX,
|
||||||
|
}
|
||||||
|
SITE_INITIALIZATION_FAILED = {
|
||||||
|
'msg_id': 338,
|
||||||
|
'loglevel': base_logging.WARNING,
|
||||||
|
'msg': 'Failed to initialize the driver for the %(site)s storage '
|
||||||
|
'system.',
|
||||||
|
'suffix': WARNING_SUFFIX,
|
||||||
|
}
|
||||||
INVALID_PORT = {
|
INVALID_PORT = {
|
||||||
'msg_id': 339,
|
'msg_id': 339,
|
||||||
'loglevel': base_logging.WARNING,
|
'loglevel': base_logging.WARNING,
|
||||||
@ -301,6 +319,19 @@ class HBSDMsg(enum.Enum):
|
|||||||
'msg': 'Failed to add the logical device.',
|
'msg': 'Failed to add the logical device.',
|
||||||
'suffix': ERROR_SUFFIX,
|
'suffix': ERROR_SUFFIX,
|
||||||
}
|
}
|
||||||
|
PAIR_TARGET_FAILED = {
|
||||||
|
'msg_id': 638,
|
||||||
|
'loglevel': base_logging.ERROR,
|
||||||
|
'msg': 'Failed to add the pair target.',
|
||||||
|
'suffix': ERROR_SUFFIX,
|
||||||
|
}
|
||||||
|
MAP_PAIR_TARGET_FAILED = {
|
||||||
|
'msg_id': 639,
|
||||||
|
'loglevel': base_logging.ERROR,
|
||||||
|
'msg': 'Failed to map a logical device to any pair targets. '
|
||||||
|
'(LDEV: %(ldev)s)',
|
||||||
|
'suffix': ERROR_SUFFIX,
|
||||||
|
}
|
||||||
POOL_NOT_FOUND = {
|
POOL_NOT_FOUND = {
|
||||||
'msg_id': 640,
|
'msg_id': 640,
|
||||||
'loglevel': base_logging.ERROR,
|
'loglevel': base_logging.ERROR,
|
||||||
@ -391,11 +422,18 @@ class HBSDMsg(enum.Enum):
|
|||||||
'This driver does not support unmanaging snapshots.',
|
'This driver does not support unmanaging snapshots.',
|
||||||
'suffix': ERROR_SUFFIX,
|
'suffix': ERROR_SUFFIX,
|
||||||
}
|
}
|
||||||
|
INVALID_EXTRA_SPEC_KEY = {
|
||||||
|
'msg_id': 723,
|
||||||
|
'loglevel': base_logging.ERROR,
|
||||||
|
'msg': 'Failed to create a volume. '
|
||||||
|
'An invalid value is specified for the extra spec key '
|
||||||
|
'"%(key)s" of the volume type. (value: %(value)s)',
|
||||||
|
'suffix': ERROR_SUFFIX,
|
||||||
|
}
|
||||||
VOLUME_COPY_FAILED = {
|
VOLUME_COPY_FAILED = {
|
||||||
'msg_id': 725,
|
'msg_id': 725,
|
||||||
'loglevel': base_logging.ERROR,
|
'loglevel': base_logging.ERROR,
|
||||||
'msg': 'Failed to copy a volume. (copy method: %(copy_method)s, '
|
'msg': 'Failed to copy a volume. (P-VOL: %(pvol)s, S-VOL: %(svol)s)',
|
||||||
'P-VOL: %(pvol)s, S-VOL: %(svol)s)',
|
|
||||||
'suffix': ERROR_SUFFIX
|
'suffix': ERROR_SUFFIX
|
||||||
}
|
}
|
||||||
REST_SERVER_CONNECT_FAILED = {
|
REST_SERVER_CONNECT_FAILED = {
|
||||||
@ -482,6 +520,61 @@ class HBSDMsg(enum.Enum):
|
|||||||
'resource of host group or wwn was found. (ports: %(ports)s)',
|
'resource of host group or wwn was found. (ports: %(ports)s)',
|
||||||
'suffix': ERROR_SUFFIX,
|
'suffix': ERROR_SUFFIX,
|
||||||
}
|
}
|
||||||
|
SITE_NOT_INITIALIZED = {
|
||||||
|
'msg_id': 751,
|
||||||
|
'loglevel': base_logging.ERROR,
|
||||||
|
'msg': 'The driver is not initialized for the %(site)s storage '
|
||||||
|
'system.',
|
||||||
|
'suffix': ERROR_SUFFIX,
|
||||||
|
}
|
||||||
|
CREATE_REPLICATION_VOLUME_FAILED = {
|
||||||
|
'msg_id': 752,
|
||||||
|
'loglevel': base_logging.ERROR,
|
||||||
|
'msg': 'Failed to create the %(type)s for a %(rep_type)s pair. '
|
||||||
|
'(volume: %(volume_id)s, volume type: %(volume_type)s, '
|
||||||
|
'size: %(size)s)',
|
||||||
|
'suffix': ERROR_SUFFIX,
|
||||||
|
}
|
||||||
|
CREATE_REPLICATION_PAIR_FAILED = {
|
||||||
|
'msg_id': 754,
|
||||||
|
'loglevel': base_logging.ERROR,
|
||||||
|
'msg': 'Failed to create a %(rep_type)s pair or '
|
||||||
|
'to mirror data in a %(rep_type)s pair. '
|
||||||
|
'(P-VOL: %(pvol)s, S-VOL: %(svol)s, copy group: '
|
||||||
|
'%(copy_group)s, pair status: %(status)s)',
|
||||||
|
'suffix': ERROR_SUFFIX,
|
||||||
|
}
|
||||||
|
SPLIT_REPLICATION_PAIR_FAILED = {
|
||||||
|
'msg_id': 755,
|
||||||
|
'loglevel': base_logging.ERROR,
|
||||||
|
'msg': 'Failed to split a %(rep_type)s pair. '
|
||||||
|
'(P-VOL: %(pvol)s, S-VOL: %(svol)s, '
|
||||||
|
'copy group: %(copy_group)s, pair status: %(status)s)',
|
||||||
|
'suffix': ERROR_SUFFIX,
|
||||||
|
}
|
||||||
|
PAIR_CHANGE_TIMEOUT = {
|
||||||
|
'msg_id': 756,
|
||||||
|
'loglevel': base_logging.ERROR,
|
||||||
|
'msg': 'A timeout occurred before the status of '
|
||||||
|
'the %(rep_type)s pair changes. '
|
||||||
|
'(P-VOL: %(pvol)s, S-VOL: %(svol)s, copy group: '
|
||||||
|
'%(copy_group)s, current status: %(current_status)s, '
|
||||||
|
'expected status: %(expected_status)s, timeout: %(timeout)s '
|
||||||
|
'seconds)',
|
||||||
|
'suffix': ERROR_SUFFIX,
|
||||||
|
}
|
||||||
|
EXTEND_REPLICATION_VOLUME_ERROR = {
|
||||||
|
'msg_id': 758,
|
||||||
|
'loglevel': base_logging.ERROR,
|
||||||
|
'msg': 'Failed to extend a volume. The LDEVs for the volume are in '
|
||||||
|
'a %(rep_type)s pair and the volume is attached. '
|
||||||
|
'(volume: %(volume_id)s, '
|
||||||
|
'LDEV: %(ldev)s, source size: %(source_size)s, destination '
|
||||||
|
'size: %(destination_size)s, P-VOL: %(pvol)s, S-VOL: %(svol)s, '
|
||||||
|
'P-VOL[numOfPorts]: %(pvol_num_of_ports)s, '
|
||||||
|
'S-VOL[numOfPorts]: %(svol_num_of_ports)s)',
|
||||||
|
'suffix': ERROR_SUFFIX,
|
||||||
|
}
|
||||||
MIGRATE_VOLUME_FAILED = {
|
MIGRATE_VOLUME_FAILED = {
|
||||||
'msg_id': 760,
|
'msg_id': 760,
|
||||||
'loglevel': base_logging.ERROR,
|
'loglevel': base_logging.ERROR,
|
||||||
@ -490,6 +583,21 @@ class HBSDMsg(enum.Enum):
|
|||||||
'(P-VOL, S-VOL, copy method, status): %(pair_info)s)',
|
'(P-VOL, S-VOL, copy method, status): %(pair_info)s)',
|
||||||
'suffix': ERROR_SUFFIX,
|
'suffix': ERROR_SUFFIX,
|
||||||
}
|
}
|
||||||
|
REPLICATION_PAIR_ERROR = {
|
||||||
|
'msg_id': 766,
|
||||||
|
'loglevel': base_logging.ERROR,
|
||||||
|
'msg': 'Failed to %(operation)s. The LDEV for the volume is in '
|
||||||
|
'a remote replication pair. (volume: %(volume)s, '
|
||||||
|
'%(snapshot_info)sLDEV: %(ldev)s)',
|
||||||
|
'suffix': ERROR_SUFFIX,
|
||||||
|
}
|
||||||
|
LDEV_NUMBER_NOT_FOUND = {
|
||||||
|
'msg_id': 770,
|
||||||
|
'loglevel': base_logging.ERROR,
|
||||||
|
'msg': 'Failed to %(operation)s. The LDEV number is not found in the '
|
||||||
|
'Cinder object. (%(obj)s: %(obj_id)s)',
|
||||||
|
'suffix': ERROR_SUFFIX,
|
||||||
|
}
|
||||||
|
|
||||||
def __init__(self, error_info):
|
def __init__(self, error_info):
|
||||||
"""Initialize Enum attributes."""
|
"""Initialize Enum attributes."""
|
||||||
@ -498,48 +606,36 @@ class HBSDMsg(enum.Enum):
|
|||||||
self.msg = error_info['msg']
|
self.msg = error_info['msg']
|
||||||
self.suffix = error_info['suffix']
|
self.suffix = error_info['suffix']
|
||||||
|
|
||||||
def output_log(self, **kwargs):
|
def output_log(self, storage_id, **kwargs):
|
||||||
"""Output the message to the log file and return the message."""
|
"""Output the message to the log file and return the message."""
|
||||||
msg = self.msg % kwargs
|
msg = self.msg % kwargs
|
||||||
LOG.log(self.level, "MSGID%(msg_id)04d-%(msg_suffix)s: %(msg)s",
|
if storage_id:
|
||||||
|
LOG.log(
|
||||||
|
self.level,
|
||||||
|
"%(storage_id)s MSGID%(msg_id)04d-%(msg_suffix)s: %(msg)s",
|
||||||
|
{'storage_id': storage_id[-6:], 'msg_id': self.msg_id,
|
||||||
|
'msg_suffix': self.suffix, 'msg': msg})
|
||||||
|
else:
|
||||||
|
LOG.log(
|
||||||
|
self.level, "MSGID%(msg_id)04d-%(msg_suffix)s: %(msg)s",
|
||||||
{'msg_id': self.msg_id, 'msg_suffix': self.suffix, 'msg': msg})
|
{'msg_id': self.msg_id, 'msg_suffix': self.suffix, 'msg': msg})
|
||||||
return msg
|
return msg
|
||||||
|
|
||||||
|
|
||||||
def output_log(msg_enum, **kwargs):
|
def output_log(msg_enum, storage_id=None, **kwargs):
|
||||||
"""Output the specified message to the log file and return the message."""
|
"""Output the specified message to the log file and return the message."""
|
||||||
return msg_enum.output_log(**kwargs)
|
return msg_enum.output_log(storage_id, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
MSG = HBSDMsg
|
MSG = HBSDMsg
|
||||||
|
|
||||||
|
|
||||||
def get_ldev(obj):
|
|
||||||
"""Get the LDEV number from the given object and return it as integer."""
|
|
||||||
if not obj:
|
|
||||||
return None
|
|
||||||
ldev = obj.get('provider_location')
|
|
||||||
if not ldev or not ldev.isdigit():
|
|
||||||
return None
|
|
||||||
return int(ldev)
|
|
||||||
|
|
||||||
|
|
||||||
def timed_out(start_time, timeout):
|
def timed_out(start_time, timeout):
|
||||||
"""Check if the specified time has passed."""
|
"""Check if the specified time has passed."""
|
||||||
return timeutils.is_older_than(start_time, timeout)
|
return timeutils.is_older_than(start_time, timeout)
|
||||||
|
|
||||||
|
|
||||||
def check_opt_value(conf, names):
|
|
||||||
"""Check if the parameter names and values are valid."""
|
|
||||||
for name in names:
|
|
||||||
try:
|
|
||||||
getattr(conf, name)
|
|
||||||
except (cfg.NoSuchOptError, cfg.ConfigFileValueError):
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
output_log(MSG.INVALID_PARAMETER, param=name)
|
|
||||||
|
|
||||||
|
|
||||||
def build_initiator_target_map(connector, target_wwns, lookup_service):
|
def build_initiator_target_map(connector, target_wwns, lookup_service):
|
||||||
"""Return a dictionary mapping server-wwns and lists of storage-wwns."""
|
"""Return a dictionary mapping server-wwns and lists of storage-wwns."""
|
||||||
init_targ_map = {}
|
init_targ_map = {}
|
||||||
@ -614,3 +710,52 @@ def get_exception_msg(exc):
|
|||||||
exc, exception.CinderException) else exc.args[0]
|
exc, exception.CinderException) else exc.args[0]
|
||||||
else:
|
else:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
|
|
||||||
|
def synchronized_on_copy_group():
|
||||||
|
def wrap(func):
|
||||||
|
@functools.wraps(func)
|
||||||
|
def inner(self, remote_client, copy_group_name, *args, **kwargs):
|
||||||
|
sync_key = '%s-%s' % (copy_group_name,
|
||||||
|
self.storage_id[-6:])
|
||||||
|
|
||||||
|
@cinder_utils.synchronized(sync_key, external=True)
|
||||||
|
def _inner():
|
||||||
|
return func(self, remote_client, copy_group_name,
|
||||||
|
*args, **kwargs)
|
||||||
|
return _inner()
|
||||||
|
return inner
|
||||||
|
return wrap
|
||||||
|
|
||||||
|
|
||||||
|
DICT = '_dict'
|
||||||
|
CONF = '_conf'
|
||||||
|
|
||||||
|
|
||||||
|
class Config(object):
|
||||||
|
|
||||||
|
def __init__(self, conf):
|
||||||
|
super().__setattr__(CONF, conf)
|
||||||
|
super().__setattr__(DICT, dict())
|
||||||
|
self._opts = {}
|
||||||
|
|
||||||
|
def __getitem__(self, name):
|
||||||
|
return (super().__getattribute__(DICT)[name]
|
||||||
|
if name in super().__getattribute__(DICT)
|
||||||
|
else super().__getattribute__(CONF).safe_get(name))
|
||||||
|
|
||||||
|
def __getattr__(self, name):
|
||||||
|
return (super().__getattribute__(DICT)[name]
|
||||||
|
if name in super().__getattribute__(DICT)
|
||||||
|
else getattr(super().__getattribute__(CONF), name))
|
||||||
|
|
||||||
|
def __setitem__(self, key, value):
|
||||||
|
super().__getattribute__(DICT)[key] = value
|
||||||
|
|
||||||
|
def __setattr__(self, key, value):
|
||||||
|
self.__setitem__(key, value)
|
||||||
|
|
||||||
|
def safe_get(self, name):
|
||||||
|
return (super().__getattribute__(DICT)[name]
|
||||||
|
if name in super().__getattribute__(DICT)
|
||||||
|
else super().__getattribute__(CONF).safe_get(name))
|
||||||
|
@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
Hitachi driver: Support Global-Active Device (GAD) volume.
|
||||||
|
GAD is a one of Hitachi storage fucntion uses volume replication
|
||||||
|
to provide a high-availability environment for hosts across storage
|
||||||
|
systems and sites. New properties will be added in configuration.
|
||||||
|
``hbsd:topology`` sets to ``active_active_mirror_volumex`` would
|
||||||
|
specify a GAD volume. ``hitachi_mirror_xxx`` parameters would
|
||||||
|
specify a secondary storage for GAD volume.
|
||||||
|
|
Loading…
x
Reference in New Issue
Block a user