Merge "[NetApp] Sync mirror support for NetApp Backends"
This commit is contained in:
commit
e0c9a012b0
@ -1352,6 +1352,8 @@ SM_SOURCE_VSERVER = 'fake_source_vserver'
|
||||
SM_SOURCE_VOLUME = 'fake_source_volume'
|
||||
SM_DEST_VSERVER = 'fake_destination_vserver'
|
||||
SM_DEST_VOLUME = 'fake_destination_volume'
|
||||
SM_SOURCE_CG = 'fake_source_cg'
|
||||
SM_DESTINATION_CG = 'fake_destination_cg'
|
||||
IGROUP_NAME = 'openstack-d9b4194f-5f65-4952-fake-26c911f1e4b2'
|
||||
LUN_NAME_PATH = '/vol/volume-fake/lun-path-fake-1234'
|
||||
|
||||
|
@ -3570,10 +3570,12 @@ class NetAppCmodeClientTestCase(test.TestCase):
|
||||
@ddt.unpack
|
||||
def test_create_snapmirror(self, schedule, policy):
|
||||
self.mock_object(self.client.connection, 'send_request')
|
||||
|
||||
fake_client.SM_SOURCE_CG = None
|
||||
fake_client.SM_DESTINATION_CG = None
|
||||
self.client.create_snapmirror(
|
||||
fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
|
||||
fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME,
|
||||
fake_client.SM_SOURCE_CG, fake_client.SM_DESTINATION_CG,
|
||||
schedule=schedule, policy=policy)
|
||||
|
||||
snapmirror_create_args = {
|
||||
|
@ -2834,6 +2834,48 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
self.client.send_request.assert_has_calls([
|
||||
mock.call('/snapmirror/relationships/', 'post', body=body)])
|
||||
|
||||
@ddt.data({'policy': 'AutomatedFailOver'})
|
||||
@ddt.unpack
|
||||
def test_create_snapmirror_active_sync(self, policy):
|
||||
"""Tests creation of snapmirror with active sync"""
|
||||
api_responses = [
|
||||
{
|
||||
"job": {
|
||||
"uuid": fake_client.FAKE_UUID,
|
||||
},
|
||||
},
|
||||
]
|
||||
self.mock_object(self.client, 'send_request',
|
||||
side_effect = copy.deepcopy(api_responses))
|
||||
self.client.create_snapmirror(
|
||||
fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
|
||||
fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME,
|
||||
fake_client.SM_SOURCE_CG, fake_client.SM_DESTINATION_CG,
|
||||
policy=policy)
|
||||
|
||||
if fake_client.SM_SOURCE_VSERVER is not None and \
|
||||
fake_client.SM_SOURCE_CG is not None:
|
||||
body = {
|
||||
'source': {
|
||||
'path':
|
||||
fake_client.SM_SOURCE_VSERVER + ':/cg/' +
|
||||
fake_client.SM_SOURCE_CG,
|
||||
'consistency_group_volumes': [
|
||||
{'name': fake_client.SM_SOURCE_VOLUME}]
|
||||
},
|
||||
'destination': {
|
||||
'path': fake_client.SM_DEST_VSERVER + ':/cg/' +
|
||||
fake_client.SM_DESTINATION_CG,
|
||||
'consistency_group_volumes': [
|
||||
{'name': fake_client.SM_DEST_VOLUME}]
|
||||
}
|
||||
}
|
||||
if policy:
|
||||
body['policy'] = {'name': policy}
|
||||
if body is not None:
|
||||
self.client.send_request.assert_has_calls([
|
||||
mock.call('/snapmirror/relationships/', 'post', body=body)])
|
||||
|
||||
def test_create_snapmirror_already_exists(self):
|
||||
api_responses = netapp_api.NaApiError(
|
||||
code=netapp_api.REST_ERELATION_EXISTS)
|
||||
@ -2866,8 +2908,35 @@ class NetAppRestCmodeClientTestCase(test.TestCase):
|
||||
relationship_type='data_protection')
|
||||
self.assertTrue(self.client.send_request.called)
|
||||
|
||||
def test__set_snapmirror_state(self):
|
||||
def test_create_ontap_consistency_group(self):
|
||||
"""Tests creation of consistency group for active sync policies"""
|
||||
api_responses = [
|
||||
{
|
||||
"job": {
|
||||
"uuid": fake_client.FAKE_UUID,
|
||||
},
|
||||
},
|
||||
]
|
||||
self.mock_object(self.client, 'send_request',
|
||||
side_effect = copy.deepcopy(api_responses))
|
||||
self.client.create_ontap_consistency_group(
|
||||
fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
|
||||
fake_client.SM_SOURCE_CG)
|
||||
|
||||
body = {
|
||||
'svm': {
|
||||
'name': fake_client.SM_SOURCE_VSERVER
|
||||
},
|
||||
'name': fake_client.SM_SOURCE_CG,
|
||||
'volumes': [{
|
||||
'name': fake_client.SM_SOURCE_VOLUME,
|
||||
"provisioning_options": {"action": "add"}
|
||||
}]
|
||||
}
|
||||
self.client.send_request.assert_has_calls([
|
||||
mock.call('/application/consistency-groups/', 'post', body=body)])
|
||||
|
||||
def test__set_snapmirror_state(self):
|
||||
api_responses = [
|
||||
fake_client.SNAPMIRROR_GET_ITER_RESPONSE_REST,
|
||||
{
|
||||
|
@ -258,11 +258,11 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
|
||||
mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os,
|
||||
'iscsi')
|
||||
self.zapi_client.map_lun.return_value = '1'
|
||||
|
||||
self.mock_object(self.library, '_is_active_sync_configured',
|
||||
return_value=False)
|
||||
lun_id = self.library._map_lun('fake_volume',
|
||||
fake.FC_FORMATTED_INITIATORS,
|
||||
protocol, None)
|
||||
|
||||
self.assertEqual('1', lun_id)
|
||||
mock_get_or_create_igroup.assert_called_once_with(
|
||||
fake.FC_FORMATTED_INITIATORS, protocol, os)
|
||||
@ -281,6 +281,8 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
|
||||
mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os}
|
||||
mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os,
|
||||
'iscsi')
|
||||
self.mock_object(self.library, '_is_active_sync_configured',
|
||||
return_value=False)
|
||||
self.library._map_lun('fake_volume',
|
||||
fake.FC_FORMATTED_INITIATORS,
|
||||
protocol, None)
|
||||
@ -306,7 +308,8 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
|
||||
'iscsi')
|
||||
mock_find_mapped_lun_igroup.return_value = (fake.IGROUP1_NAME, '2')
|
||||
self.zapi_client.map_lun.side_effect = netapp_api.NaApiError
|
||||
|
||||
self.mock_object(self.library, '_is_active_sync_configured',
|
||||
return_value=False)
|
||||
lun_id = self.library._map_lun(
|
||||
'fake_volume', fake.FC_FORMATTED_INITIATORS, protocol, None)
|
||||
|
||||
|
@ -121,6 +121,51 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
|
||||
mock_get_ontap_version.assert_called_once_with(cached=False)
|
||||
mock_get_cluster_nodes_info.assert_called_once_with()
|
||||
|
||||
@ddt.data(fake.AFF_SYSTEM_NODES_INFO,
|
||||
fake.FAS_SYSTEM_NODES_INFO,
|
||||
fake.HYBRID_SYSTEM_NODES_INFO)
|
||||
@mock.patch.object(client_base.Client, 'get_ontap_version',
|
||||
return_value='9.6')
|
||||
@mock.patch.object(perf_cmode, 'PerformanceCmodeLibrary', mock.Mock())
|
||||
@mock.patch.object(client_base.Client, 'get_ontapi_version',
|
||||
mock.MagicMock(return_value=(1, 20)))
|
||||
@mock.patch.object(capabilities.CapabilitiesLibrary,
|
||||
'cluster_user_supported')
|
||||
@mock.patch.object(capabilities.CapabilitiesLibrary,
|
||||
'check_api_permissions')
|
||||
@mock.patch.object(na_utils, 'check_flags')
|
||||
@mock.patch.object(block_base.NetAppBlockStorageLibrary, 'do_setup')
|
||||
def test_do_setup_with_replication(self, cluster_nodes_info,
|
||||
super_do_setup, mock_check_flags,
|
||||
mock_check_api_permissions,
|
||||
mock_cluster_user_supported,
|
||||
mock_get_ontap_version):
|
||||
"""Tests setup method when replication is enabled"""
|
||||
self.mock_object(client_base.Client, '_init_ssh_client')
|
||||
mock_get_cluster_nodes_info = self.mock_object(
|
||||
client_cmode.Client, '_get_cluster_nodes_info',
|
||||
return_value=cluster_nodes_info)
|
||||
self.mock_object(
|
||||
dot_utils, 'get_backend_configuration',
|
||||
return_value=self.get_config_cmode())
|
||||
context = mock.Mock()
|
||||
self.replication_enabled = True
|
||||
self.replication_policy = "AutomatedFailOver"
|
||||
self.replication_backends = ['target_1', 'target_2']
|
||||
self.mock_object(self.library, 'get_replication_backend_names',
|
||||
return_value=self.replication_backends)
|
||||
self.mock_object(self.library, 'get_replication_policy',
|
||||
return_value=self.replication_policy)
|
||||
|
||||
self.library.do_setup(context)
|
||||
|
||||
super_do_setup.assert_called_once_with(context)
|
||||
self.assertEqual(1, mock_check_flags.call_count)
|
||||
mock_check_api_permissions.assert_called_once_with()
|
||||
mock_cluster_user_supported.assert_called_once_with()
|
||||
mock_get_ontap_version.assert_called_once_with(cached=False)
|
||||
mock_get_cluster_nodes_info.assert_called_once_with()
|
||||
|
||||
def test_check_for_setup_error(self):
|
||||
super_check_for_setup_error = self.mock_object(
|
||||
block_base.NetAppBlockStorageLibrary, 'check_for_setup_error')
|
||||
|
@ -17,10 +17,11 @@ from unittest import mock
|
||||
import ddt
|
||||
from oslo_config import cfg
|
||||
|
||||
from cinder import exception
|
||||
from cinder.tests.unit import fake_volume
|
||||
from cinder.tests.unit import test
|
||||
from cinder.tests.unit import utils as test_utils
|
||||
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as\
|
||||
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as \
|
||||
dataontap_fakes
|
||||
from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes
|
||||
from cinder.volume import configuration
|
||||
@ -50,6 +51,10 @@ class NetAppCDOTDataMotionMixinTestCase(test.TestCase):
|
||||
self.mock_cmode_client = self.mock_object(client_cmode, 'Client')
|
||||
self.src_flexvol_name = 'volume_c02d497a_236c_4852_812a_0d39373e312a'
|
||||
self.dest_flexvol_name = self.src_flexvol_name
|
||||
self.src_cg = ''
|
||||
self.dest_cg = ''
|
||||
self.active_sync_policy = False
|
||||
self.replication_policy = 'MirrorAllSnapshots'
|
||||
self.mock_src_client = mock.Mock()
|
||||
self.mock_dest_client = mock.Mock()
|
||||
self.config = fakes.get_fake_cmode_config(self.src_backend)
|
||||
@ -199,7 +204,8 @@ class NetAppCDOTDataMotionMixinTestCase(test.TestCase):
|
||||
self.dm_mixin.create_snapmirror(self.src_backend,
|
||||
self.dest_backend,
|
||||
self.src_flexvol_name,
|
||||
self.dest_flexvol_name)
|
||||
self.dest_flexvol_name,
|
||||
self.replication_policy)
|
||||
|
||||
if not dest_exists:
|
||||
create_destination_flexvol.assert_called_once_with(
|
||||
@ -207,16 +213,20 @@ class NetAppCDOTDataMotionMixinTestCase(test.TestCase):
|
||||
self.dest_flexvol_name, pool_is_flexgroup=is_flexgroup)
|
||||
else:
|
||||
self.assertFalse(create_destination_flexvol.called)
|
||||
sync_mirror = False
|
||||
mock_dest_client.create_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name,
|
||||
self.src_cg,
|
||||
self.dest_cg,
|
||||
schedule='hourly',
|
||||
policy=self.replication_policy,
|
||||
relationship_type=('extended_data_protection'
|
||||
if is_flexgroup
|
||||
if is_flexgroup or sync_mirror
|
||||
else 'data_protection'))
|
||||
mock_dest_client.initialize_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name)
|
||||
self.dest_flexvol_name, self.active_sync_policy)
|
||||
|
||||
def test_create_snapmirror_cleanup_on_geometry_has_changed(self):
|
||||
mock_dest_client = mock.Mock()
|
||||
@ -254,17 +264,19 @@ class NetAppCDOTDataMotionMixinTestCase(test.TestCase):
|
||||
self.src_backend,
|
||||
self.dest_backend,
|
||||
self.src_flexvol_name,
|
||||
self.dest_flexvol_name)
|
||||
self.dest_flexvol_name,
|
||||
self.replication_policy)
|
||||
|
||||
self.assertFalse(create_destination_flexvol.called)
|
||||
mock_dest_client.create_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name, schedule='hourly',
|
||||
self.dest_flexvol_name, self.src_cg, self.dest_cg,
|
||||
schedule='hourly', policy=self.replication_policy,
|
||||
relationship_type='data_protection')
|
||||
|
||||
mock_dest_client.initialize_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name)
|
||||
self.dest_flexvol_name, self.active_sync_policy)
|
||||
|
||||
mock_delete_snapshot.assert_called_once_with(
|
||||
self.src_backend, self.dest_backend, self.src_flexvol_name,
|
||||
@ -285,7 +297,8 @@ class NetAppCDOTDataMotionMixinTestCase(test.TestCase):
|
||||
self.dm_mixin.create_snapmirror(self.src_backend,
|
||||
self.dest_backend,
|
||||
self.src_flexvol_name,
|
||||
self.dest_flexvol_name)
|
||||
self.dest_flexvol_name,
|
||||
self.replication_policy)
|
||||
|
||||
self.assertFalse(mock_dest_client.create_snapmirror.called)
|
||||
self.assertFalse(mock_dest_client.initialize_snapmirror.called)
|
||||
@ -320,7 +333,8 @@ class NetAppCDOTDataMotionMixinTestCase(test.TestCase):
|
||||
self.dm_mixin.create_snapmirror(self.src_backend,
|
||||
self.dest_backend,
|
||||
self.src_flexvol_name,
|
||||
self.dest_flexvol_name)
|
||||
self.dest_flexvol_name,
|
||||
self.replication_policy)
|
||||
|
||||
self.assertFalse(mock_dest_client.create_snapmirror.called)
|
||||
self.assertFalse(mock_dest_client.initialize_snapmirror.called)
|
||||
@ -885,13 +899,13 @@ class NetAppCDOTDataMotionMixinTestCase(test.TestCase):
|
||||
self.mock_object(self.dm_mixin, 'create_snapmirror')
|
||||
expected_calls = [
|
||||
mock.call(self.src_backend, replication_backends[0],
|
||||
flexvols[0], flexvols[0]),
|
||||
flexvols[0], flexvols[0], self.replication_policy),
|
||||
mock.call(self.src_backend, replication_backends[0],
|
||||
flexvols[1], flexvols[1]),
|
||||
flexvols[1], flexvols[1], self.replication_policy),
|
||||
mock.call(self.src_backend, replication_backends[1],
|
||||
flexvols[0], flexvols[0]),
|
||||
flexvols[0], flexvols[0], self.replication_policy),
|
||||
mock.call(self.src_backend, replication_backends[1],
|
||||
flexvols[1], flexvols[1]),
|
||||
flexvols[1], flexvols[1], self.replication_policy),
|
||||
]
|
||||
|
||||
retval = self.dm_mixin.ensure_snapmirrors(self.mock_src_config,
|
||||
@ -923,7 +937,7 @@ class NetAppCDOTDataMotionMixinTestCase(test.TestCase):
|
||||
|
||||
excepted_call = mock.call(
|
||||
self.src_backend, replication_backends[0],
|
||||
flexvols[0], flexvols[0])
|
||||
flexvols[0], flexvols[0], self.replication_policy)
|
||||
self.dm_mixin.create_snapmirror.assert_has_calls([
|
||||
excepted_call, excepted_call, excepted_call
|
||||
])
|
||||
@ -1027,6 +1041,287 @@ class NetAppCDOTDataMotionMixinTestCase(test.TestCase):
|
||||
self.assertEqual('fallback2', target)
|
||||
self.assertFalse(mock_debug_log.called)
|
||||
|
||||
def test__failover_host_to_same_host(self):
|
||||
"""Tests failover host to same host throws error"""
|
||||
# Mock the required attributes
|
||||
self.dm_mixin.backend_name = "backend1"
|
||||
secondary_id = "backend1"
|
||||
volumes = []
|
||||
# Assert that an exception is raised
|
||||
self.assertRaises(exception.InvalidReplicationTarget,
|
||||
self.dm_mixin._failover_host, volumes, secondary_id)
|
||||
|
||||
def test__failover_host_to_default(self):
|
||||
"""Tests failover host to default sets the old primary as a """
|
||||
"""new primary"""
|
||||
# Mock the required attributes
|
||||
self.dm_mixin.backend_name = "backend1"
|
||||
secondary_id = "default"
|
||||
volumes = [{'id': 'volume1', 'host': 'backend1#pool1'}]
|
||||
|
||||
# Mock the necessary methods
|
||||
self.dm_mixin._update_zapi_client = mock.Mock()
|
||||
self.get_replication_backend_names = mock.Mock(return_value=
|
||||
["backend1"])
|
||||
|
||||
# Call the method
|
||||
result = self.dm_mixin._failover_host(volumes, secondary_id)
|
||||
|
||||
# Assert the expected result
|
||||
expected_result = ("backend1",
|
||||
[{'volume_id': 'volume1',
|
||||
'updates': {'replication_status': 'enabled'}}],
|
||||
[])
|
||||
self.assertEqual(result, expected_result)
|
||||
self.assertTrue(self.dm_mixin._update_zapi_client.called)
|
||||
|
||||
def test__failover_host_to_custom_host(self):
|
||||
"""Tests failover host to custom host sets the secondary """
|
||||
"""as a new primary"""
|
||||
# Mock the required attributes
|
||||
self.dm_mixin.backend_name = "backend1"
|
||||
secondary_id = "backend2"
|
||||
volumes = [{'id': 'volume1', 'host': 'backend1#pool1'}]
|
||||
|
||||
# Mock the necessary methods
|
||||
self.dm_mixin._complete_failover = \
|
||||
mock.Mock(return_value=
|
||||
("backend2", [{'volume_id': 'volume1',
|
||||
'updates':
|
||||
{'replication_status': 'enabled'}}]))
|
||||
self.dm_mixin._update_zapi_client = mock.Mock()
|
||||
self.dm_mixin.configuration = self.config
|
||||
self.dm_mixin.get_replication_backend_names = \
|
||||
mock.Mock(return_value=["backend1", "backend2"])
|
||||
self.mock_object(utils, 'get_backend_configuration')
|
||||
volume_list = ['pool1', 'vol1', 'vol2']
|
||||
self.dm_mixin.ssc_library = mock.Mock()
|
||||
self.mock_object(self.dm_mixin.ssc_library,
|
||||
'get_ssc_flexvol_names', return_value=volume_list)
|
||||
|
||||
# Call the method
|
||||
result = self.dm_mixin._failover_host(volumes, secondary_id)
|
||||
|
||||
# Assert the expected result
|
||||
expected_result = ("backend2",
|
||||
[{'volume_id': 'volume1',
|
||||
'updates': {'replication_status': 'enabled'}}],
|
||||
[])
|
||||
self.assertEqual(result, expected_result)
|
||||
self.assertTrue(self.dm_mixin._complete_failover.called)
|
||||
self.assertTrue(self.dm_mixin._update_zapi_client.called)
|
||||
|
||||
def test__failover_host_without_replication_targets(self):
|
||||
"""Tests failover host to a target which doenst exist """
|
||||
# Mock the required attributes
|
||||
self.dm_mixin.backend_name = "backend1"
|
||||
secondary_id = "backend2"
|
||||
volumes = [{'id': 'volume1', 'host': 'backend1#pool1'}]
|
||||
|
||||
# Mock the necessary methods
|
||||
self.dm_mixin._complete_failover = \
|
||||
mock.Mock(return_value=("backend2",
|
||||
[{'volume_id': 'volume1',
|
||||
'updates':
|
||||
{'replication_status': 'enabled'}}]))
|
||||
self.dm_mixin._update_zapi_client = mock.Mock()
|
||||
self.dm_mixin.configuration = self.config
|
||||
self.dm_mixin.get_replication_backend_names = \
|
||||
mock.Mock(return_value=[])
|
||||
self.mock_object(utils, 'get_backend_configuration')
|
||||
self.dm_mixin.host = "host1"
|
||||
# Assert that an exception is raised
|
||||
self.assertRaises(exception.InvalidReplicationTarget,
|
||||
self.dm_mixin._failover_host, volumes, secondary_id)
|
||||
|
||||
def test__failover_host_secondary_id_not_in_replication_target(self):
|
||||
"""Tests failover host to custom host whose id is not there """
|
||||
"""in replication target list"""
|
||||
# Mock the required attributes
|
||||
self.dm_mixin.backend_name = "backend1"
|
||||
secondary_id = "backend3"
|
||||
volumes = [{'id': 'volume1', 'host': 'backend1#pool1'}]
|
||||
|
||||
# Mock the necessary methods
|
||||
self.dm_mixin._complete_failover = \
|
||||
mock.Mock(return_value=("backend2",
|
||||
[{'volume_id': 'volume1',
|
||||
'updates':
|
||||
{'replication_status': 'enabled'}}]))
|
||||
self.dm_mixin._update_zapi_client = mock.Mock()
|
||||
self.dm_mixin.configuration = self.config
|
||||
self.dm_mixin.get_replication_backend_names = \
|
||||
mock.Mock(return_value=["backend1", "backend2"])
|
||||
self.mock_object(utils, 'get_backend_configuration')
|
||||
self.dm_mixin.host = "host1"
|
||||
|
||||
# Assert that an exception is raised
|
||||
self.assertRaises(exception.InvalidReplicationTarget,
|
||||
self.dm_mixin._failover_host, volumes, secondary_id)
|
||||
|
||||
def test__failover_host_no_suitable_target(self):
|
||||
"""Tests failover host to a host which is not a suitable secondary """
|
||||
# Mock the required attributes
|
||||
self.dm_mixin.backend_name = "backend1"
|
||||
secondary_id = "backend2"
|
||||
volumes = [{'id': 'volume1', 'host': 'backend1#pool1'}]
|
||||
|
||||
# Mock the necessary methods
|
||||
self.mock_object(data_motion.DataMotionMixin, '_complete_failover',
|
||||
side_effect=na_utils.NetAppDriverException)
|
||||
self.dm_mixin.configuration = self.config
|
||||
self.dm_mixin.get_replication_backend_names = \
|
||||
mock.Mock(return_value=["backend1", "backend2"])
|
||||
self.mock_object(utils, 'get_backend_configuration')
|
||||
volume_list = ['pool1', 'vol1', 'vol2']
|
||||
self.dm_mixin.ssc_library = mock.Mock()
|
||||
self.mock_object(self.dm_mixin.ssc_library, 'get_ssc_flexvol_names',
|
||||
return_value=volume_list)
|
||||
|
||||
# Assert that an exception is raised
|
||||
self.assertRaises(exception.UnableToFailOver,
|
||||
self.dm_mixin._failover_host, volumes, secondary_id)
|
||||
|
||||
def test__failover_to_same_host(self):
|
||||
"""Tests failover to same host throws error"""
|
||||
# Mock the required attributes
|
||||
self.dm_mixin.backend_name = "backend1"
|
||||
secondary_id = "backend1"
|
||||
volumes = []
|
||||
|
||||
# Assert that an exception is raised
|
||||
self.assertRaises(exception.InvalidReplicationTarget,
|
||||
self.dm_mixin._failover, 'fake_context',
|
||||
volumes, secondary_id)
|
||||
|
||||
def test__failover_to_default(self):
|
||||
"""Tests failover to default sets the old primary as a new primary"""
|
||||
# Mock the required attributes
|
||||
self.dm_mixin.backend_name = "backend1"
|
||||
secondary_id = "default"
|
||||
volumes = [{'id': 'volume1', 'host': 'backend1#pool1'}]
|
||||
|
||||
# Mock the necessary methods
|
||||
self.dm_mixin._update_zapi_client = mock.Mock()
|
||||
self.get_replication_backend_names = \
|
||||
mock.Mock(return_value=["backend1"])
|
||||
# Call the method
|
||||
result = self.dm_mixin._failover('fake_context', volumes,
|
||||
secondary_id)
|
||||
# Assert the expected result
|
||||
expected_result = ("backend1",
|
||||
[{'volume_id': 'volume1',
|
||||
'updates': {'replication_status': 'enabled'}}],
|
||||
[])
|
||||
self.assertEqual(result, expected_result)
|
||||
self.assertTrue(self.dm_mixin._update_zapi_client.called)
|
||||
|
||||
def test__failover_to_custom_host(self):
|
||||
"""Tests failover to custom host sets the secondary """
|
||||
"""as a new primary"""
|
||||
# Mock the required attributes
|
||||
self.dm_mixin.backend_name = "backend1"
|
||||
secondary_id = "backend2"
|
||||
volumes = [{'id': 'volume1', 'host': 'backend1#pool1'}]
|
||||
|
||||
# Mock the necessary methods
|
||||
self.dm_mixin._complete_failover = \
|
||||
mock.Mock(return_value=("backend2",
|
||||
[{'volume_id': 'volume1',
|
||||
'updates':
|
||||
{'replication_status': 'enabled'}}]))
|
||||
self.dm_mixin.configuration = self.config
|
||||
self.dm_mixin.get_replication_backend_names = \
|
||||
mock.Mock(return_value=["backend1", "backend2"])
|
||||
self.mock_object(utils, 'get_backend_configuration')
|
||||
volume_list = ['pool1', 'vol1', 'vol2']
|
||||
self.dm_mixin.ssc_library = mock.Mock()
|
||||
self.mock_object(self.dm_mixin.ssc_library,
|
||||
'get_ssc_flexvol_names', return_value=volume_list)
|
||||
|
||||
# Call the method
|
||||
result = self.dm_mixin._failover('fake_context', volumes,
|
||||
secondary_id)
|
||||
# Assert the expected result
|
||||
expected_result = ("backend2",
|
||||
[{'volume_id': 'volume1',
|
||||
'updates': {'replication_status': 'enabled'}}],
|
||||
[])
|
||||
self.assertEqual(result, expected_result)
|
||||
self.assertTrue(self.dm_mixin._complete_failover.called)
|
||||
|
||||
def test__failover_without_replication_targets(self):
|
||||
"""Tests failover to a target which doenst exist """
|
||||
# Mock the required attributes
|
||||
self.dm_mixin.backend_name = "backend1"
|
||||
secondary_id = "backend2"
|
||||
volumes = [{'id': 'volume1', 'host': 'backend1#pool1'}]
|
||||
|
||||
# Mock the necessary methods
|
||||
self.dm_mixin._complete_failover = \
|
||||
mock.Mock(return_value=("backend2",
|
||||
[{'volume_id': 'volume1',
|
||||
'updates':
|
||||
{'replication_status': 'enabled'}}]))
|
||||
self.dm_mixin._update_zapi_client = mock.Mock()
|
||||
self.dm_mixin.configuration = self.config
|
||||
self.dm_mixin.get_replication_backend_names = \
|
||||
mock.Mock(return_value=[])
|
||||
self.mock_object(utils, 'get_backend_configuration')
|
||||
self.dm_mixin.host = "host1"
|
||||
|
||||
# Assert that an exception is raised
|
||||
self.assertRaises(exception.InvalidReplicationTarget,
|
||||
self.dm_mixin._failover, 'fake_context',
|
||||
volumes, secondary_id)
|
||||
|
||||
def test__failover_secondary_id_not_in_replication_target(self):
|
||||
"""Tests failover to custom host whose id is not there """
|
||||
"""in replication target list"""
|
||||
# Mock the required attributes
|
||||
self.dm_mixin.backend_name = "backend1"
|
||||
secondary_id = "backend3"
|
||||
volumes = [{'id': 'volume1', 'host': 'backend1#pool1'}]
|
||||
|
||||
# Mock the necessary methods
|
||||
self.dm_mixin._complete_failover = \
|
||||
mock.Mock(return_value=("backend2",
|
||||
[{'volume_id': 'volume1',
|
||||
'updates':
|
||||
{'replication_status': 'enabled'}}]))
|
||||
self.dm_mixin._update_zapi_client = mock.Mock()
|
||||
self.dm_mixin.configuration = self.config
|
||||
self.dm_mixin.get_replication_backend_names = \
|
||||
mock.Mock(return_value=["backend1", "backend2"])
|
||||
self.mock_object(utils, 'get_backend_configuration')
|
||||
self.dm_mixin.host = "host1"
|
||||
|
||||
# Assert that an exception is raised
|
||||
self.assertRaises(exception.InvalidReplicationTarget,
|
||||
self.dm_mixin._failover, 'fake_context',
|
||||
volumes, secondary_id)
|
||||
|
||||
def test__failover_no_suitable_target(self):
|
||||
"""Tests failover to a host which is not a suitable secondary """
|
||||
# Mock the required attributes
|
||||
self.dm_mixin.backend_name = "backend1"
|
||||
secondary_id = "backend2"
|
||||
volumes = [{'id': 'volume1', 'host': 'backend1#pool1'}]
|
||||
self.mock_object(data_motion.DataMotionMixin, '_complete_failover',
|
||||
side_effect=na_utils.NetAppDriverException)
|
||||
self.dm_mixin.configuration = self.config
|
||||
self.dm_mixin.get_replication_backend_names = \
|
||||
mock.Mock(return_value=["backend1", "backend2"])
|
||||
self.mock_object(utils, 'get_backend_configuration')
|
||||
volume_list = ['pool1', 'vol1', 'vol2']
|
||||
self.dm_mixin.ssc_library = mock.Mock()
|
||||
self.mock_object(self.dm_mixin.ssc_library,
|
||||
'get_ssc_flexvol_names', return_value=volume_list)
|
||||
# Assert that an exception is raised
|
||||
self.assertRaises(exception.UnableToFailOver,
|
||||
self.dm_mixin._failover, 'fake_context',
|
||||
volumes, secondary_id)
|
||||
|
||||
def test__complete_failover_no_suitable_target(self):
|
||||
flexvols = ['nvol1', 'nvol2']
|
||||
replication_backends = ['fallback1', 'fallback2']
|
||||
|
@ -69,7 +69,8 @@ class NetAppLun(object):
|
||||
|
||||
|
||||
class NetAppBlockStorageLibrary(
|
||||
object, metaclass=volume_utils.TraceWrapperMetaclass):
|
||||
object,
|
||||
metaclass=volume_utils.TraceWrapperMetaclass):
|
||||
"""NetApp block storage library for Data ONTAP."""
|
||||
|
||||
# do not increment this as it may be used in volume type definitions
|
||||
@ -94,6 +95,7 @@ class NetAppBlockStorageLibrary(
|
||||
self.driver_name = driver_name
|
||||
self.driver_protocol = driver_protocol
|
||||
self.zapi_client = None
|
||||
self.dest_zapi_client = None
|
||||
self._stats = {}
|
||||
self.lun_table = {}
|
||||
self.lun_ostype = None
|
||||
@ -440,7 +442,10 @@ class NetAppBlockStorageLibrary(
|
||||
" host OS.",
|
||||
{'ig_nm': igroup_name, 'ig_os': ig_host_os})
|
||||
try:
|
||||
return self.zapi_client.map_lun(path, igroup_name, lun_id=lun_id)
|
||||
result = self.zapi_client.map_lun(path, igroup_name, lun_id=lun_id)
|
||||
if self._is_active_sync_configured(self.configuration):
|
||||
self.dest_zapi_client.map_lun(path, igroup_name, lun_id=lun_id)
|
||||
return result
|
||||
except netapp_api.NaApiError as e:
|
||||
(_igroup, lun_id) = self._find_mapped_lun_igroup(path,
|
||||
initiator_list)
|
||||
@ -464,6 +469,8 @@ class NetAppBlockStorageLibrary(
|
||||
|
||||
for _path, _igroup_name in lun_unmap_list:
|
||||
self.zapi_client.unmap_lun(_path, _igroup_name)
|
||||
if self._is_active_sync_configured(self.configuration):
|
||||
self.dest_zapi_client.unmap_lun(_path, _igroup_name)
|
||||
|
||||
def _find_mapped_lun_igroup(self, path, initiator_list):
|
||||
"""Find an igroup for a LUN mapped to the given initiator(s)."""
|
||||
@ -473,6 +480,21 @@ class NetAppBlockStorageLibrary(
|
||||
"""Checks whether any LUNs are mapped to the given initiator(s)."""
|
||||
return self.zapi_client.has_luns_mapped_to_initiators(initiator_list)
|
||||
|
||||
def _is_active_sync_configured(self, config):
|
||||
backend_names = []
|
||||
replication_devices = config.safe_get('replication_device')
|
||||
if replication_devices:
|
||||
for replication_device in replication_devices:
|
||||
backend_id = replication_device.get('backend_id')
|
||||
if backend_id:
|
||||
backend_names.append(backend_id)
|
||||
|
||||
replication_enabled = True if backend_names else False
|
||||
if replication_enabled:
|
||||
return config.safe_get('netapp_replication_policy') == \
|
||||
"AutomatedFailOver"
|
||||
return False
|
||||
|
||||
def _get_or_create_igroup(self, initiator_list, initiator_group_type,
|
||||
host_os_type):
|
||||
"""Checks for an igroup for a set of one or more initiators.
|
||||
@ -493,6 +515,19 @@ class NetAppBlockStorageLibrary(
|
||||
else:
|
||||
igroup_name = self._create_igroup_add_initiators(
|
||||
initiator_group_type, host_os_type, initiator_list)
|
||||
if self._is_active_sync_configured(self.configuration):
|
||||
igroups_dest = self.dest_zapi_client.get_igroup_by_initiators(
|
||||
initiator_list)
|
||||
for igroup in igroups_dest:
|
||||
igroup_name_dest = igroup['initiator-group-name']
|
||||
if igroup_name_dest.startswith(na_utils.OPENSTACK_PREFIX):
|
||||
host_os_type = igroup['initiator-group-os-type']
|
||||
initiator_group_type = igroup['initiator-group-type']
|
||||
break
|
||||
else:
|
||||
self._create_igroup_add_initiators(
|
||||
initiator_group_type, host_os_type, initiator_list)
|
||||
|
||||
return igroup_name, host_os_type, initiator_group_type
|
||||
|
||||
def _create_igroup_add_initiators(self, initiator_group_type,
|
||||
@ -501,8 +536,15 @@ class NetAppBlockStorageLibrary(
|
||||
igroup_name = na_utils.OPENSTACK_PREFIX + str(uuid.uuid4())
|
||||
self.zapi_client.create_igroup(igroup_name, initiator_group_type,
|
||||
host_os_type)
|
||||
if self._is_active_sync_configured(self.configuration):
|
||||
self.dest_zapi_client.create_igroup(igroup_name,
|
||||
initiator_group_type,
|
||||
host_os_type)
|
||||
for initiator in initiator_list:
|
||||
self.zapi_client.add_igroup_initiator(igroup_name, initiator)
|
||||
if self._is_active_sync_configured(self.configuration):
|
||||
self.dest_zapi_client.add_igroup_initiator(igroup_name,
|
||||
initiator)
|
||||
return igroup_name
|
||||
|
||||
def _delete_lun_from_table(self, name):
|
||||
|
@ -76,8 +76,9 @@ class NetAppBlockStorageCmodeLibrary(
|
||||
**kwargs)
|
||||
self.configuration.append_config_values(na_opts.netapp_cluster_opts)
|
||||
self.driver_mode = 'cluster'
|
||||
self.failed_over_backend_name = kwargs.get('active_backend_id')
|
||||
self.failed_over = self.failed_over_backend_name is not None
|
||||
self.failed_over_backend_name = kwargs.get('active_backend_id').\
|
||||
strip() if kwargs.get('active_backend_id') is not None else None
|
||||
self.failed_over = bool(self.failed_over_backend_name)
|
||||
self.replication_enabled = (
|
||||
True if self.get_replication_backend_names(
|
||||
self.configuration) else False)
|
||||
@ -91,6 +92,18 @@ class NetAppBlockStorageCmodeLibrary(
|
||||
self.failed_over_backend_name or self.backend_name)
|
||||
self.vserver = self.zapi_client.vserver
|
||||
|
||||
self.dest_zapi_client = None
|
||||
if self.replication_enabled:
|
||||
if self.get_replication_policy(self.configuration) == \
|
||||
"AutomatedFailOver":
|
||||
backend_names = self.get_replication_backend_names(
|
||||
self.configuration)
|
||||
for dest_backend_name in backend_names:
|
||||
dest_backend_config = dot_utils.get_backend_configuration(
|
||||
dest_backend_name)
|
||||
dest_vserver = dest_backend_config.netapp_vserver
|
||||
self.dest_zapi_client = dot_utils.get_client_for_backend(
|
||||
dest_backend_name, vserver_name=dest_vserver)
|
||||
# Storage service catalog
|
||||
self.ssc_library = capabilities.CapabilitiesLibrary(
|
||||
self.driver_protocol, self.vserver, self.zapi_client,
|
||||
|
@ -2566,6 +2566,7 @@ class Client(client_base.Client, metaclass=volume_utils.TraceWrapperMetaclass):
|
||||
|
||||
def create_snapmirror(self, source_vserver, source_volume,
|
||||
destination_vserver, destination_volume,
|
||||
src_cg_name=None, dest_cg_name=None,
|
||||
schedule=None, policy=None,
|
||||
relationship_type='data_protection'):
|
||||
"""Creates a SnapMirror relationship (cDOT 8.2 or later only)."""
|
||||
@ -2592,6 +2593,7 @@ class Client(client_base.Client, metaclass=volume_utils.TraceWrapperMetaclass):
|
||||
|
||||
def initialize_snapmirror(self, source_vserver, source_volume,
|
||||
destination_vserver, destination_volume,
|
||||
active_sync_policy=None,
|
||||
source_snapshot=None, transfer_priority=None):
|
||||
"""Initializes a SnapMirror relationship (cDOT 8.2 or later only)."""
|
||||
self._ensure_snapmirror_v2()
|
||||
|
@ -2024,8 +2024,32 @@ class RestClient(object, metaclass=volume_utils.TraceWrapperMetaclass):
|
||||
|
||||
return snapmirrors
|
||||
|
||||
def create_ontap_consistency_group(self, source_vserver, source_volume,
|
||||
source_cg):
|
||||
"""Creates a ontap consistency group"""
|
||||
|
||||
body = {
|
||||
'svm': {
|
||||
'name': source_vserver
|
||||
},
|
||||
'name': source_cg,
|
||||
'volumes': [{
|
||||
'name': source_volume,
|
||||
"provisioning_options": {"action": "add"}
|
||||
}]
|
||||
}
|
||||
|
||||
try:
|
||||
self.send_request('/application/consistency-groups/', 'post',
|
||||
body=body)
|
||||
except netapp_api.NaApiError as e:
|
||||
if e.code != netapp_api.REST_ERELATION_EXISTS:
|
||||
raise e
|
||||
|
||||
def create_snapmirror(self, source_vserver, source_volume,
|
||||
destination_vserver, destination_volume,
|
||||
source_cg=None,
|
||||
destination_cg=None,
|
||||
schedule=None, policy=None,
|
||||
relationship_type='data_protection'):
|
||||
"""Creates a SnapMirror relationship.
|
||||
@ -2037,15 +2061,27 @@ class RestClient(object, metaclass=volume_utils.TraceWrapperMetaclass):
|
||||
relationship_type will be ignored because XDP is the only type
|
||||
supported through REST API.
|
||||
"""
|
||||
|
||||
body = {
|
||||
'source': {
|
||||
'path': source_vserver + ':' + source_volume
|
||||
},
|
||||
'destination': {
|
||||
'path': destination_vserver + ':' + destination_volume
|
||||
if source_cg is not None:
|
||||
body = {
|
||||
'source': {
|
||||
'path': source_vserver + ':/cg/' + source_cg,
|
||||
'consistency_group_volumes': [{'name': source_volume}]
|
||||
},
|
||||
'destination': {
|
||||
'path': destination_vserver + ':/cg/' + destination_cg,
|
||||
'consistency_group_volumes':
|
||||
[{'name': destination_volume}]
|
||||
}
|
||||
}
|
||||
else:
|
||||
body = {
|
||||
'source': {
|
||||
'path': source_vserver + ':' + source_volume
|
||||
},
|
||||
'destination': {
|
||||
'path': destination_vserver + ':' + destination_volume
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if policy:
|
||||
body['policy'] = {'name': policy}
|
||||
@ -2094,6 +2130,7 @@ class RestClient(object, metaclass=volume_utils.TraceWrapperMetaclass):
|
||||
|
||||
def initialize_snapmirror(self, source_vserver, source_volume,
|
||||
destination_vserver, destination_volume,
|
||||
active_sync_policy=False,
|
||||
source_snapshot=None, transfer_priority=None):
|
||||
"""Initializes a SnapMirror relationship."""
|
||||
|
||||
@ -2101,9 +2138,11 @@ class RestClient(object, metaclass=volume_utils.TraceWrapperMetaclass):
|
||||
# This error is raised when using ZAPI with different volume component
|
||||
# numbers, but in REST, the job must be checked sometimes before that
|
||||
# error occurs.
|
||||
|
||||
state = 'snapmirrored'
|
||||
if active_sync_policy:
|
||||
state = 'in_sync'
|
||||
return self._set_snapmirror_state(
|
||||
'snapmirrored', source_vserver, source_volume,
|
||||
state, source_vserver, source_volume,
|
||||
destination_vserver, destination_volume, wait_result=False)
|
||||
|
||||
def abort_snapmirror(self, source_vserver, source_volume,
|
||||
|
@ -91,9 +91,29 @@ class DataMotionMixin(object):
|
||||
replication_aggregate_map.pop('backend_id')
|
||||
aggregate_map = replication_aggregate_map
|
||||
break
|
||||
|
||||
return aggregate_map
|
||||
|
||||
def get_replication_policy(self, config):
|
||||
"""Get replication policy for the configured replication targets."""
|
||||
return config.safe_get('netapp_replication_policy') or \
|
||||
"MirrorAllSnapshots"
|
||||
|
||||
def is_sync_mirror_policy(self, replication_policy):
|
||||
return "Sync" in replication_policy or "StrictSync" in \
|
||||
replication_policy
|
||||
|
||||
def is_active_sync_asymmetric_policy(self, replication_policy):
|
||||
return "AutomatedFailOver" in replication_policy
|
||||
|
||||
def is_active_sync_configured(self, configuration):
|
||||
replication_enabled = (
|
||||
True if self.get_replication_backend_names(
|
||||
configuration) else False)
|
||||
if replication_enabled:
|
||||
return self.get_replication_policy(configuration) == \
|
||||
"AutomatedFailOver"
|
||||
return False
|
||||
|
||||
def get_snapmirrors(self, src_backend_name, dest_backend_name,
|
||||
src_flexvol_name=None, dest_flexvol_name=None):
|
||||
"""Get info regarding SnapMirror relationship/s for given params."""
|
||||
@ -123,7 +143,8 @@ class DataMotionMixin(object):
|
||||
return snapmirrors
|
||||
|
||||
def create_snapmirror(self, src_backend_name, dest_backend_name,
|
||||
src_flexvol_name, dest_flexvol_name):
|
||||
src_flexvol_name, dest_flexvol_name,
|
||||
replication_policy):
|
||||
"""Set up a SnapMirror relationship b/w two FlexVols (cinder pools)
|
||||
|
||||
1. Create SnapMirror relationship
|
||||
@ -140,10 +161,16 @@ class DataMotionMixin(object):
|
||||
src_backend_name)
|
||||
src_vserver = source_backend_config.netapp_vserver
|
||||
|
||||
dest_client = config_utils.get_client_for_backend(
|
||||
dest_backend_name, vserver_name=dest_vserver)
|
||||
src_client = config_utils.get_client_for_backend(
|
||||
src_backend_name, vserver_name=src_vserver)
|
||||
if replication_policy == "AutomatedFailOver":
|
||||
dest_client = config_utils.get_client_for_backend(
|
||||
dest_backend_name, vserver_name=dest_vserver, force_rest=True)
|
||||
src_client = config_utils.get_client_for_backend(
|
||||
src_backend_name, vserver_name=src_vserver, force_rest=True)
|
||||
else:
|
||||
dest_client = config_utils.get_client_for_backend(
|
||||
dest_backend_name, vserver_name=dest_vserver)
|
||||
src_client = config_utils.get_client_for_backend(
|
||||
src_backend_name, vserver_name=src_vserver)
|
||||
|
||||
provisioning_options = (
|
||||
src_client.get_provisioning_options_from_flexvol(
|
||||
@ -160,9 +187,23 @@ class DataMotionMixin(object):
|
||||
dest_flexvol_name,
|
||||
pool_is_flexgroup=pool_is_flexgroup)
|
||||
|
||||
sync_mirror_policy = self.is_sync_mirror_policy(replication_policy)
|
||||
active_sync_asymmetric_policy = self.is_active_sync_asymmetric_policy(
|
||||
replication_policy)
|
||||
src_cg = "cg_" + src_flexvol_name if active_sync_asymmetric_policy \
|
||||
else ""
|
||||
dest_cg = "cg_" + dest_flexvol_name if active_sync_asymmetric_policy \
|
||||
else ""
|
||||
src_cg_path = "/cg/" + str(src_cg)
|
||||
dest_cg_path = "/cg/" + str(dest_cg)
|
||||
|
||||
# 2. Check if SnapMirror relationship exists
|
||||
existing_mirrors = dest_client.get_snapmirrors(
|
||||
src_vserver, src_flexvol_name, dest_vserver, dest_flexvol_name)
|
||||
if active_sync_asymmetric_policy:
|
||||
existing_mirrors = dest_client.get_snapmirrors(
|
||||
src_vserver, src_cg_path, dest_vserver, dest_cg_path)
|
||||
else:
|
||||
existing_mirrors = dest_client.get_snapmirrors(
|
||||
src_vserver, src_flexvol_name, dest_vserver, dest_flexvol_name)
|
||||
|
||||
msg_payload = {
|
||||
'src_vserver': src_vserver,
|
||||
@ -173,34 +214,45 @@ class DataMotionMixin(object):
|
||||
|
||||
# 3. Create and initialize SnapMirror if it doesn't already exist
|
||||
if not existing_mirrors:
|
||||
|
||||
# TODO(gouthamr): Change the schedule from hourly to a config value
|
||||
# TODO(gouthamr): Change the schedule from hourly to config value
|
||||
msg = ("Creating a SnapMirror relationship between "
|
||||
"%(src_vserver)s:%(src_volume)s and %(dest_vserver)s:"
|
||||
"%(src_vserver)s:%(src_flexvol_name)s and %(dest_vserver)s:"
|
||||
"%(dest_volume)s.")
|
||||
LOG.debug(msg, msg_payload)
|
||||
|
||||
try:
|
||||
if active_sync_asymmetric_policy:
|
||||
src_client.create_ontap_consistency_group(
|
||||
src_vserver, src_flexvol_name, src_cg)
|
||||
|
||||
dest_client.create_snapmirror(
|
||||
src_vserver,
|
||||
src_flexvol_name,
|
||||
dest_vserver,
|
||||
dest_flexvol_name,
|
||||
schedule='hourly',
|
||||
relationship_type=('extended_data_protection'
|
||||
if pool_is_flexgroup
|
||||
else 'data_protection'))
|
||||
|
||||
msg = ("Initializing SnapMirror transfers between "
|
||||
"%(src_vserver)s:%(src_volume)s and %(dest_vserver)s:"
|
||||
"%(dest_volume)s.")
|
||||
LOG.debug(msg, msg_payload)
|
||||
src_cg,
|
||||
dest_cg,
|
||||
schedule=None
|
||||
if sync_mirror_policy or active_sync_asymmetric_policy
|
||||
else 'hourly',
|
||||
policy=replication_policy,
|
||||
relationship_type=(
|
||||
'extended_data_protection'
|
||||
if pool_is_flexgroup or sync_mirror_policy
|
||||
else 'data_protection'))
|
||||
|
||||
# Initialize async transfer of the initial data
|
||||
dest_client.initialize_snapmirror(src_vserver,
|
||||
src_flexvol_name,
|
||||
dest_vserver,
|
||||
dest_flexvol_name)
|
||||
if active_sync_asymmetric_policy:
|
||||
src_flexvol_name = src_cg_path
|
||||
dest_flexvol_name = dest_cg_path
|
||||
if not sync_mirror_policy:
|
||||
msg = ("Initializing SnapMirror transfers between "
|
||||
"%(src_vserver)s:%(src_volume)s and "
|
||||
"%(dest_vserver)s:%(dest_volume)s.")
|
||||
LOG.debug(msg, msg_payload)
|
||||
dest_client.initialize_snapmirror(
|
||||
src_vserver, src_flexvol_name, dest_vserver,
|
||||
dest_flexvol_name, active_sync_asymmetric_policy)
|
||||
except netapp_api.NaApiError as e:
|
||||
with excutils.save_and_reraise_exception() as raise_ctxt:
|
||||
if (e.code == netapp_api.EAPIERROR and
|
||||
@ -219,7 +271,11 @@ class DataMotionMixin(object):
|
||||
# 4. Try to repair SnapMirror if existing
|
||||
else:
|
||||
snapmirror = existing_mirrors[0]
|
||||
if snapmirror.get('mirror-state') != 'snapmirrored':
|
||||
if active_sync_asymmetric_policy:
|
||||
src_flexvol_name = src_cg_path
|
||||
dest_flexvol_name = dest_cg_path
|
||||
if snapmirror.get('mirror-state') != 'snapmirrored' and \
|
||||
snapmirror.get('mirror-state') != 'in_sync':
|
||||
try:
|
||||
msg = ("SnapMirror between %(src_vserver)s:%(src_volume)s "
|
||||
"and %(dest_vserver)s:%(dest_volume)s is in "
|
||||
@ -556,6 +612,7 @@ class DataMotionMixin(object):
|
||||
def ensure_snapmirrors(self, config, src_backend_name, src_flexvol_names):
|
||||
"""Ensure all the SnapMirrors needed for whole-backend replication."""
|
||||
backend_names = self.get_replication_backend_names(config)
|
||||
replication_policy = self.get_replication_policy(config)
|
||||
for dest_backend_name in backend_names:
|
||||
for src_flexvol_name in src_flexvol_names:
|
||||
|
||||
@ -571,7 +628,8 @@ class DataMotionMixin(object):
|
||||
self.create_snapmirror(src_backend_name,
|
||||
dest_backend_name,
|
||||
src_flexvol_name,
|
||||
dest_flexvol_name)
|
||||
dest_flexvol_name,
|
||||
replication_policy)
|
||||
try:
|
||||
_try_create_snapmirror()
|
||||
except na_utils.NetAppDriverException as e:
|
||||
@ -753,39 +811,63 @@ class DataMotionMixin(object):
|
||||
msg = _("Cannot failover to the same host as the primary.")
|
||||
raise exception.InvalidReplicationTarget(reason=msg)
|
||||
|
||||
replication_targets = self.get_replication_backend_names(
|
||||
self.configuration)
|
||||
# Added logic to handle failback from the secondary to old primary
|
||||
# This condition is needed when the DR/replication conditions are
|
||||
# restored back to normal state
|
||||
if secondary_id == "default":
|
||||
LOG.debug('Fails back to primary')
|
||||
volume_updates = []
|
||||
volume_update = []
|
||||
# Update the ZAPI client to the backend we failed over to
|
||||
active_backend_name = self.backend_name
|
||||
self._update_zapi_client(active_backend_name)
|
||||
self.failed_over = False
|
||||
self.failed_over_backend_name = active_backend_name
|
||||
for volume in volumes:
|
||||
volume_update = []
|
||||
replication_status = fields.ReplicationStatus.ENABLED
|
||||
volume_update = {
|
||||
'volume_id': volume['id'],
|
||||
'updates': {'replication_status': replication_status},
|
||||
}
|
||||
volume_updates.append(volume_update)
|
||||
return active_backend_name, volume_updates, []
|
||||
|
||||
if not replication_targets:
|
||||
msg = _("No replication targets configured for backend "
|
||||
"%s. Cannot failover.")
|
||||
raise exception.InvalidReplicationTarget(reason=msg % self.host)
|
||||
elif secondary_id and secondary_id not in replication_targets:
|
||||
msg = _("%(target)s is not among replication targets configured "
|
||||
"for back end %(host)s. Cannot failover.")
|
||||
payload = {
|
||||
'target': secondary_id,
|
||||
'host': self.host,
|
||||
}
|
||||
raise exception.InvalidReplicationTarget(reason=msg % payload)
|
||||
else:
|
||||
replication_targets = self.get_replication_backend_names(
|
||||
self.configuration)
|
||||
|
||||
flexvols = self.ssc_library.get_ssc_flexvol_names()
|
||||
if not replication_targets:
|
||||
msg = _("No replication targets configured for backend "
|
||||
"%s. Cannot failover.")
|
||||
raise exception.InvalidReplicationTarget(
|
||||
reason=msg % self.host)
|
||||
if secondary_id and secondary_id not in replication_targets:
|
||||
msg = _("%(target)s is not among replication targets "
|
||||
"configured for back end %(host)s. Cannot failover.")
|
||||
payload = {
|
||||
'target': secondary_id,
|
||||
'host': self.host,
|
||||
}
|
||||
raise exception.InvalidReplicationTarget(reason=msg % payload)
|
||||
|
||||
try:
|
||||
active_backend_name, volume_updates = self._complete_failover(
|
||||
self.backend_name, replication_targets, flexvols, volumes,
|
||||
failover_target=secondary_id)
|
||||
except na_utils.NetAppDriverException as e:
|
||||
msg = _("Could not complete failover: %s") % e
|
||||
raise exception.UnableToFailOver(reason=msg)
|
||||
flexvols = self.ssc_library.get_ssc_flexvol_names()
|
||||
|
||||
# Update the ZAPI client to the backend we failed over to
|
||||
self._update_zapi_client(active_backend_name)
|
||||
try:
|
||||
active_backend_name, volume_updates = self._complete_failover(
|
||||
self.backend_name, replication_targets, flexvols, volumes,
|
||||
failover_target=secondary_id)
|
||||
except na_utils.NetAppDriverException as e:
|
||||
msg = _("Could not complete failover: %s") % e
|
||||
raise exception.UnableToFailOver(reason=msg)
|
||||
|
||||
self.failed_over = True
|
||||
self.failed_over_backend_name = active_backend_name
|
||||
# Update the ZAPI client to the backend we failed over to
|
||||
self._update_zapi_client(active_backend_name)
|
||||
|
||||
return active_backend_name, volume_updates, []
|
||||
self.failed_over = True
|
||||
self.failed_over_backend_name = active_backend_name
|
||||
|
||||
return active_backend_name, volume_updates, []
|
||||
|
||||
def _failover(self, context, volumes, secondary_id=None, groups=None):
|
||||
"""Failover to replication target."""
|
||||
@ -793,33 +875,55 @@ class DataMotionMixin(object):
|
||||
msg = _("Cannot failover to the same host as the primary.")
|
||||
raise exception.InvalidReplicationTarget(reason=msg)
|
||||
|
||||
replication_targets = self.get_replication_backend_names(
|
||||
self.configuration)
|
||||
# Added logic to handle failback from the secondary to old primary
|
||||
# This condition is needed when the DR/replication conditions are
|
||||
# restored back to normal state
|
||||
if secondary_id == "default":
|
||||
LOG.debug('Fails back to primary inside _failover')
|
||||
volume_updates = []
|
||||
volume_update = []
|
||||
# Update the ZAPI client to the backend we failed over to
|
||||
active_backend_name = self.backend_name
|
||||
self._update_zapi_client(active_backend_name)
|
||||
self.failed_over = False
|
||||
self.failed_over_backend_name = active_backend_name
|
||||
for volume in volumes:
|
||||
replication_status = fields.ReplicationStatus.ENABLED
|
||||
volume_update = {
|
||||
'volume_id': volume['id'],
|
||||
'updates': {'replication_status': replication_status},
|
||||
}
|
||||
volume_updates.append(volume_update)
|
||||
return active_backend_name, volume_updates, []
|
||||
else:
|
||||
replication_targets = self.get_replication_backend_names(
|
||||
self.configuration)
|
||||
|
||||
if not replication_targets:
|
||||
msg = _("No replication targets configured for backend "
|
||||
"%s. Cannot failover.")
|
||||
raise exception.InvalidReplicationTarget(reason=msg % self.host)
|
||||
elif secondary_id and secondary_id not in replication_targets:
|
||||
msg = _("%(target)s is not among replication targets configured "
|
||||
"for back end %(host)s. Cannot failover.")
|
||||
payload = {
|
||||
'target': secondary_id,
|
||||
'host': self.host,
|
||||
}
|
||||
raise exception.InvalidReplicationTarget(reason=msg % payload)
|
||||
if not replication_targets:
|
||||
msg = _("No replication targets configured for backend "
|
||||
"%s. Cannot failover.")
|
||||
raise exception.InvalidReplicationTarget(
|
||||
reason=msg % self.host)
|
||||
if secondary_id and secondary_id not in replication_targets:
|
||||
msg = _("%(target)s is not among replication targets "
|
||||
"configured for back end %(host)s. Cannot failover.")
|
||||
payload = {
|
||||
'target': secondary_id,
|
||||
'host': self.host,
|
||||
}
|
||||
raise exception.InvalidReplicationTarget(reason=msg % payload)
|
||||
|
||||
flexvols = self.ssc_library.get_ssc_flexvol_names()
|
||||
flexvols = self.ssc_library.get_ssc_flexvol_names()
|
||||
|
||||
try:
|
||||
active_backend_name, volume_updates = self._complete_failover(
|
||||
self.backend_name, replication_targets, flexvols, volumes,
|
||||
failover_target=secondary_id)
|
||||
except na_utils.NetAppDriverException as e:
|
||||
msg = _("Could not complete failover: %s") % e
|
||||
raise exception.UnableToFailOver(reason=msg)
|
||||
try:
|
||||
active_backend_name, volume_updates = self._complete_failover(
|
||||
self.backend_name, replication_targets, flexvols, volumes,
|
||||
failover_target=secondary_id)
|
||||
except na_utils.NetAppDriverException as e:
|
||||
msg = _("Could not complete failover: %s") % e
|
||||
raise exception.UnableToFailOver(reason=msg)
|
||||
|
||||
return active_backend_name, volume_updates, []
|
||||
return active_backend_name, volume_updates, []
|
||||
|
||||
def _failover_completed(self, context, secondary_id=None):
|
||||
"""Update volume node when `failover` is completed."""
|
||||
|
@ -213,7 +213,15 @@ netapp_replication_opts = [
|
||||
min=60,
|
||||
default=360, # Default to six minutes
|
||||
help='Sets time in seconds to wait for a replication volume '
|
||||
'create to complete and go online.')]
|
||||
'create to complete and go online.'),
|
||||
cfg.StrOpt('netapp_replication_policy',
|
||||
default='MirrorAllSnapshots',
|
||||
help='This option defines the replication policy to be used '
|
||||
'while creating snapmirror relationship. Default is '
|
||||
'MirrorAllSnapshots which is based on async-mirror.'
|
||||
'User can pass values like Sync, StrictSync for '
|
||||
'synchronous snapmirror relationship (SM-S) to achieve '
|
||||
'zero RPO')]
|
||||
|
||||
netapp_support_opts = [
|
||||
cfg.StrOpt('netapp_api_trace_pattern',
|
||||
|
@ -0,0 +1,8 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Synchronous mirror support for NetApp Backends. Providing an option
|
||||
netapp_replication_policy for the replication of netapp backends, to
|
||||
enable the user to apply synchronous mirror and other relevant policies.
|
||||
Code also has been added to fail back from secondary to primary
|
||||
via default option.
|
Loading…
x
Reference in New Issue
Block a user