Merge "Dell SC: Added support for failover_host failback"
This commit is contained in:
commit
82c352649b
cinder
tests/unit
volume/drivers/dell
releasenotes/notes
@ -27,12 +27,15 @@ from cinder.volume import volume_types
|
||||
|
||||
# We patch these here as they are used by every test to keep
|
||||
# from trying to contact a Dell Storage Center.
|
||||
MOCKAPI = mock.MagicMock()
|
||||
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.HttpClient,
|
||||
'__init__',
|
||||
return_value=None)
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'open_connection',
|
||||
return_value=mock.MagicMock())
|
||||
return_value=MOCKAPI)
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'close_connection')
|
||||
class DellSCSanISCSIDriverTestCase(test.TestCase):
|
||||
@ -269,10 +272,6 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
||||
'provider_location': "%s:3260,1 %s 0"
|
||||
% (self.driver.configuration.dell_sc_iscsi_ip,
|
||||
self.fake_iqn)
|
||||
# ,
|
||||
# 'provider_auth': 'CHAP %s %s' % (
|
||||
# self.configuration.eqlx_chap_login,
|
||||
# self.configuration.eqlx_chap_password)
|
||||
}
|
||||
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
@ -2048,7 +2047,7 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
||||
|
||||
res = self.driver.retype(
|
||||
None, {'id': fake.VOLUME_ID}, None,
|
||||
{'extra_specs': {'replication_enabled': [False, True]}},
|
||||
{'extra_specs': {'replication_enabled': [None, '<is> True']}},
|
||||
None)
|
||||
self.assertTrue(mock_create_replications.called)
|
||||
self.assertFalse(mock_delete_replications.called)
|
||||
@ -2056,7 +2055,7 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
||||
'replication_driver_data': '54321'}, res)
|
||||
res = self.driver.retype(
|
||||
None, {'id': fake.VOLUME_ID}, None,
|
||||
{'extra_specs': {'replication_enabled': [True, False]}},
|
||||
{'extra_specs': {'replication_enabled': ['<is> True', None]}},
|
||||
None)
|
||||
self.assertTrue(mock_delete_replications.called)
|
||||
self.assertEqual({'replication_status': 'disabled',
|
||||
@ -2210,7 +2209,10 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
||||
'find_volume')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'remove_mappings')
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'failback_volumes')
|
||||
def test_failover_host(self,
|
||||
mock_failback_volumes,
|
||||
mock_remove_mappings,
|
||||
mock_find_volume,
|
||||
mock_parse_secondary,
|
||||
@ -2258,6 +2260,8 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
||||
'provider_id': '2.1'}},
|
||||
{'volume_id': fake.VOLUME2_ID, 'updates':
|
||||
{'status': 'error'}}]
|
||||
self.driver.failed_over = False
|
||||
self.driver.active_backend_id = None
|
||||
destssn, volume_update = self.driver.failover_host(
|
||||
{}, volumes, '12345')
|
||||
self.assertEqual(expected_destssn, destssn)
|
||||
@ -2270,6 +2274,8 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
||||
'provider_id': '2.1'}},
|
||||
{'volume_id': fake.VOLUME2_ID, 'updates':
|
||||
{'status': 'error'}}]
|
||||
self.driver.failed_over = False
|
||||
self.driver.active_backend_id = None
|
||||
destssn, volume_update = self.driver.failover_host(
|
||||
{}, volumes, '12345')
|
||||
self.assertEqual(expected_destssn, destssn)
|
||||
@ -2281,12 +2287,16 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
||||
{'status': 'error'}},
|
||||
{'volume_id': fake.VOLUME2_ID, 'updates':
|
||||
{'status': 'error'}}]
|
||||
self.driver.failed_over = False
|
||||
self.driver.active_backend_id = None
|
||||
destssn, volume_update = self.driver.failover_host(
|
||||
{}, volumes, '12345')
|
||||
self.assertEqual(expected_destssn, destssn)
|
||||
self.assertEqual(expected_volume_update, volume_update)
|
||||
# Secondary not found.
|
||||
mock_parse_secondary.return_value = None
|
||||
self.driver.failed_over = False
|
||||
self.driver.active_backend_id = None
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
self.driver.failover_host,
|
||||
{},
|
||||
@ -2294,11 +2304,8 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
||||
'54321')
|
||||
# Already failed over.
|
||||
self.driver.failed_over = True
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
self.driver.failover_host,
|
||||
{},
|
||||
volumes,
|
||||
'12345')
|
||||
self.driver.failover_host({}, volumes, 'default')
|
||||
mock_failback_volumes.assert_called_once_with(volumes)
|
||||
self.driver.replication_enabled = False
|
||||
|
||||
def test__get_unmanaged_replay(self,
|
||||
@ -2420,3 +2427,780 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
||||
mock_find_replay.return_value = screplay
|
||||
self.driver.unmanage_snapshot(snapshot)
|
||||
mock_unmanage_replay.assert_called_once_with(screplay)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_get_qos',
|
||||
return_value='cinderqos')
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_parse_extraspecs',
|
||||
return_value={'replay_profile_string': 'pro'})
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'find_volume')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'find_repl_volume')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'delete_replication')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'replicate_to_common')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'remove_mappings')
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_wait_for_replication')
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_reattach_remaining_replications')
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_fixup_types')
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_volume_updates',
|
||||
return_value=[])
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_update_backend')
|
||||
def test_failback_volumes(self,
|
||||
mock_update_backend,
|
||||
mock_volume_updates,
|
||||
mock_fixup_types,
|
||||
mock_reattach_remaining_replications,
|
||||
mock_wait_for_replication,
|
||||
mock_remove_mappings,
|
||||
mock_replicate_to_common,
|
||||
mock_delete_replication,
|
||||
mock_find_repl_volume,
|
||||
mock_find_volume,
|
||||
mock_parse_extraspecs,
|
||||
mock_get_qos,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
self.driver.replication_enabled = True
|
||||
self.driver.failed_over = True
|
||||
self.driver.active_backend_id = 12345
|
||||
self.driver.primaryssn = 11111
|
||||
backends = self.driver.backends
|
||||
self.driver.backends = [{'target_device_id': '12345',
|
||||
'qosnode': 'cinderqos'},
|
||||
{'target_device_id': '67890',
|
||||
'qosnode': 'cinderqos'}]
|
||||
volumes = [{'id': fake.VOLUME_ID,
|
||||
'replication_driver_data': '12345',
|
||||
'provider_id': '12345.1'},
|
||||
{'id': fake.VOLUME2_ID,
|
||||
'replication_driver_data': '12345',
|
||||
'provider_id': '12345.2'}]
|
||||
mock_find_volume.side_effect = [{'instanceId': '12345.1'},
|
||||
{'instanceId': '12345.2'}]
|
||||
mock_find_repl_volume.side_effect = [{'instanceId': '11111.1'},
|
||||
{'instanceId': '11111.2'}]
|
||||
mock_replicate_to_common.side_effect = [{'instanceId': '12345.100',
|
||||
'destinationVolume':
|
||||
{'instanceId': '11111.3'}
|
||||
},
|
||||
{'instanceId': '12345.200',
|
||||
'destinationVolume':
|
||||
{'instanceId': '11111.4'}
|
||||
}]
|
||||
# we don't care about the return. We just want to make sure that
|
||||
# _wait_for_replication is called with the proper replitems.
|
||||
self.driver.failback_volumes(volumes)
|
||||
expected = [{'volume': volumes[0],
|
||||
'specs': {'replay_profile_string': 'pro'},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.100',
|
||||
'cvol': '12345.1',
|
||||
'ovol': '11111.1',
|
||||
'nvol': '11111.3',
|
||||
'rdd': '12345',
|
||||
'status': 'inprogress'},
|
||||
{'volume': volumes[1],
|
||||
'specs': {'replay_profile_string': 'pro'},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.200',
|
||||
'cvol': '12345.2',
|
||||
'ovol': '11111.2',
|
||||
'nvol': '11111.4',
|
||||
'rdd': '12345',
|
||||
'status': 'inprogress'}
|
||||
]
|
||||
# We are stubbing everything out so we just want to be sure this hits
|
||||
# _volume_updates as expected. (Ordinarily this would be modified by
|
||||
# the time it hit this but since it isn't we use this to our advantage
|
||||
# and check that our replitems was set correctly coming out of the
|
||||
# main loop.)
|
||||
mock_volume_updates.assert_called_once_with(expected)
|
||||
|
||||
self.driver.replication_enabled = False
|
||||
self.driver.failed_over = False
|
||||
self.driver.backends = backends
|
||||
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_get_qos',
|
||||
return_value='cinderqos')
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_parse_extraspecs',
|
||||
return_value={'replay_profile_string': 'pro'})
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'find_volume')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'find_repl_volume')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'delete_replication')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'replicate_to_common')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'remove_mappings')
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_wait_for_replication')
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_reattach_remaining_replications')
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_fixup_types')
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_volume_updates',
|
||||
return_value=[])
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_update_backend')
|
||||
def test_failback_volumes_with_some_not_replicated(
|
||||
self,
|
||||
mock_update_backend,
|
||||
mock_volume_updates,
|
||||
mock_fixup_types,
|
||||
mock_reattach_remaining_replications,
|
||||
mock_wait_for_replication,
|
||||
mock_remove_mappings,
|
||||
mock_replicate_to_common,
|
||||
mock_delete_replication,
|
||||
mock_find_repl_volume,
|
||||
mock_find_volume,
|
||||
mock_parse_extraspecs,
|
||||
mock_get_qos,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
self.driver.replication_enabled = True
|
||||
self.driver.failed_over = True
|
||||
self.driver.active_backend_id = 12345
|
||||
self.driver.primaryssn = 11111
|
||||
backends = self.driver.backends
|
||||
self.driver.backends = [{'target_device_id': '12345',
|
||||
'qosnode': 'cinderqos'},
|
||||
{'target_device_id': '67890',
|
||||
'qosnode': 'cinderqos'}]
|
||||
volumes = [{'id': fake.VOLUME_ID,
|
||||
'replication_driver_data': '12345',
|
||||
'provider_id': '12345.1'},
|
||||
{'id': fake.VOLUME2_ID,
|
||||
'replication_driver_data': '12345',
|
||||
'provider_id': '12345.2'},
|
||||
{'id': fake.VOLUME3_ID, 'provider_id': '11111.10'}]
|
||||
mock_find_volume.side_effect = [{'instanceId': '12345.1'},
|
||||
{'instanceId': '12345.2'}]
|
||||
mock_find_repl_volume.side_effect = [{'instanceId': '11111.1'},
|
||||
{'instanceId': '11111.2'}]
|
||||
mock_replicate_to_common.side_effect = [{'instanceId': '12345.100',
|
||||
'destinationVolume':
|
||||
{'instanceId': '11111.3'}
|
||||
},
|
||||
{'instanceId': '12345.200',
|
||||
'destinationVolume':
|
||||
{'instanceId': '11111.4'}
|
||||
}]
|
||||
expected = [{'volume': volumes[0],
|
||||
'specs': {'replay_profile_string': 'pro'},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.100',
|
||||
'cvol': '12345.1',
|
||||
'ovol': '11111.1',
|
||||
'nvol': '11111.3',
|
||||
'rdd': '12345',
|
||||
'status': 'inprogress'},
|
||||
{'volume': volumes[1],
|
||||
'specs': {'replay_profile_string': 'pro'},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.200',
|
||||
'cvol': '12345.2',
|
||||
'ovol': '11111.2',
|
||||
'nvol': '11111.4',
|
||||
'rdd': '12345',
|
||||
'status': 'inprogress'}
|
||||
]
|
||||
ret = self.driver.failback_volumes(volumes)
|
||||
mock_volume_updates.assert_called_once_with(expected)
|
||||
|
||||
# make sure ret is right. In this case just the unreplicated volume
|
||||
# as our volume updates elsewhere return nothing.
|
||||
expected_updates = [{'volume_id': fake.VOLUME3_ID,
|
||||
'updates': {'status': 'available'}}]
|
||||
self.assertEqual(expected_updates, ret)
|
||||
self.driver.replication_enabled = False
|
||||
self.driver.failed_over = False
|
||||
self.driver.backends = backends
|
||||
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_get_qos',
|
||||
return_value='cinderqos')
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_update_backend')
|
||||
def test_failback_volumes_with_none_replicated(
|
||||
self,
|
||||
mock_update_backend,
|
||||
mock_get_qos,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
self.driver.replication_enabled = True
|
||||
self.driver.failed_over = True
|
||||
self.driver.active_backend_id = 12345
|
||||
self.driver.primaryssn = 11111
|
||||
backends = self.driver.backends
|
||||
self.driver.backends = [{'target_device_id': '12345',
|
||||
'qosnode': 'cinderqos'},
|
||||
{'target_device_id': '67890',
|
||||
'qosnode': 'cinderqos'}]
|
||||
volumes = [{'id': fake.VOLUME_ID,
|
||||
'provider_id': '11111.1'},
|
||||
{'id': fake.VOLUME2_ID, 'provider_id': '11111.2'},
|
||||
{'id': fake.VOLUME3_ID, 'provider_id': '11111.10'}]
|
||||
|
||||
ret = self.driver.failback_volumes(volumes)
|
||||
|
||||
# make sure ret is right. In this case just the unreplicated volume
|
||||
# as our volume updates elsewhere return nothing.
|
||||
expected_updates = [{'volume_id': fake.VOLUME_ID,
|
||||
'updates': {'status': 'available'}},
|
||||
{'volume_id': fake.VOLUME2_ID,
|
||||
'updates': {'status': 'available'}},
|
||||
{'volume_id': fake.VOLUME3_ID,
|
||||
'updates': {'status': 'available'}}]
|
||||
self.assertEqual(expected_updates, ret)
|
||||
self.driver.replication_enabled = False
|
||||
self.driver.failed_over = False
|
||||
self.driver.backends = backends
|
||||
|
||||
def test_volume_updates(self,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
items = [{'volume': {'id': fake.VOLUME_ID},
|
||||
'specs': {'replay_profile_string': 'pro'},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.100',
|
||||
'cvol': '12345.1',
|
||||
'ovol': '11111.1',
|
||||
'nvol': '11111.3',
|
||||
'rdd': '12345,67890',
|
||||
'status': 'available'},
|
||||
{'volume': {'id': fake.VOLUME2_ID},
|
||||
'specs': {'replay_profile_string': 'pro'},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.200',
|
||||
'cvol': '12345.2',
|
||||
'ovol': '11111.2',
|
||||
'nvol': '11111.4',
|
||||
'rdd': '12345,67890',
|
||||
'status': 'available'}
|
||||
]
|
||||
ret = self.driver._volume_updates(items)
|
||||
expected = [{'volume_id': fake.VOLUME_ID,
|
||||
'updates': {'status': 'available',
|
||||
'replication_status': 'enabled',
|
||||
'provider_id': '11111.3',
|
||||
'replication_driver_data': '12345,67890'}},
|
||||
{'volume_id': fake.VOLUME2_ID,
|
||||
'updates': {'status': 'available',
|
||||
'replication_status': 'enabled',
|
||||
'provider_id': '11111.4',
|
||||
'replication_driver_data': '12345,67890'}}
|
||||
]
|
||||
self.assertEqual(expected, ret)
|
||||
items.append({'volume': {'id': fake.VOLUME3_ID},
|
||||
'specs': {'replay_profile_string': 'pro'},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.300',
|
||||
'cvol': '12345.5',
|
||||
'ovol': '11111.5',
|
||||
'nvol': '11111.6',
|
||||
'rdd': '12345',
|
||||
'status': 'error'})
|
||||
|
||||
ret = self.driver._volume_updates(items)
|
||||
expected.append({'volume_id': fake.VOLUME3_ID,
|
||||
'updates': {'status': 'error',
|
||||
'replication_status': 'error',
|
||||
'provider_id': '11111.6',
|
||||
'replication_driver_data': '12345'}})
|
||||
self.assertEqual(expected, ret)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'get_volume',
|
||||
return_value=VOLUME)
|
||||
def test_fixup_types(self,
|
||||
mock_get_volume,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
items = [{'volume': {'id': fake.VOLUME_ID},
|
||||
'specs': {'replay_profile_string': 'pro'},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.100',
|
||||
'cvol': '12345.1',
|
||||
'ovol': '11111.1',
|
||||
'nvol': '11111.3',
|
||||
'rdd': '12345,67890',
|
||||
'status': 'reattached'},
|
||||
{'volume': {'id': fake.VOLUME2_ID},
|
||||
'specs': {'replay_profile_string': 'pro'},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.200',
|
||||
'cvol': '12345.2',
|
||||
'ovol': '11111.2',
|
||||
'nvol': '11111.4',
|
||||
'rdd': '12345,67890',
|
||||
'status': 'reattached'}
|
||||
]
|
||||
mock_api = mock.Mock()
|
||||
mock_api.update_replay_profiles.return_value = True
|
||||
self.driver._fixup_types(mock_api, items)
|
||||
expected = [{'volume': {'id': fake.VOLUME_ID},
|
||||
'specs': {'replay_profile_string': 'pro'},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.100',
|
||||
'cvol': '12345.1',
|
||||
'ovol': '11111.1',
|
||||
'nvol': '11111.3',
|
||||
'rdd': '12345,67890',
|
||||
'status': 'available'},
|
||||
{'volume': {'id': fake.VOLUME2_ID},
|
||||
'specs': {'replay_profile_string': 'pro'},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.200',
|
||||
'cvol': '12345.2',
|
||||
'ovol': '11111.2',
|
||||
'nvol': '11111.4',
|
||||
'rdd': '12345,67890',
|
||||
'status': 'available'}]
|
||||
self.assertEqual(expected, items)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'get_volume',
|
||||
return_value=VOLUME)
|
||||
def test_fixup_types_with_error(self,
|
||||
mock_get_volume,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
items = [{'volume': {'id': fake.VOLUME_ID},
|
||||
'specs': {'replay_profile_string': 'pro'},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.100',
|
||||
'cvol': '12345.1',
|
||||
'ovol': '11111.1',
|
||||
'nvol': '11111.3',
|
||||
'rdd': '12345,67890',
|
||||
'status': 'reattached'},
|
||||
{'volume': {'id': fake.VOLUME2_ID},
|
||||
'specs': {'replay_profile_string': 'pro'},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.200',
|
||||
'cvol': '12345.2',
|
||||
'ovol': '11111.2',
|
||||
'nvol': '11111.4',
|
||||
'rdd': '12345,67890',
|
||||
'status': 'reattached'}
|
||||
]
|
||||
# One good one fail.
|
||||
mock_api = mock.Mock()
|
||||
mock_api.update_replay_profiles.side_effect = [True, False]
|
||||
self.driver._fixup_types(mock_api, items)
|
||||
expected = [{'volume': {'id': fake.VOLUME_ID},
|
||||
'specs': {'replay_profile_string': 'pro'},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.100',
|
||||
'cvol': '12345.1',
|
||||
'ovol': '11111.1',
|
||||
'nvol': '11111.3',
|
||||
'rdd': '12345,67890',
|
||||
'status': 'available'},
|
||||
{'volume': {'id': fake.VOLUME2_ID},
|
||||
'specs': {'replay_profile_string': 'pro'},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.200',
|
||||
'cvol': '12345.2',
|
||||
'ovol': '11111.2',
|
||||
'nvol': '11111.4',
|
||||
'rdd': '12345,67890',
|
||||
'status': 'error'}]
|
||||
self.assertEqual(expected, items)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'get_volume',
|
||||
return_value=VOLUME)
|
||||
def test_fixup_types_with_previous_error(self,
|
||||
mock_get_volume,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
items = [{'volume': {'id': fake.VOLUME_ID},
|
||||
'specs': {'replay_profile_string': 'pro'},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.100',
|
||||
'cvol': '12345.1',
|
||||
'ovol': '11111.1',
|
||||
'nvol': '11111.3',
|
||||
'rdd': '12345,67890',
|
||||
'status': 'reattached'},
|
||||
{'volume': {'id': fake.VOLUME2_ID},
|
||||
'specs': {'replay_profile_string': 'pro'},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.200',
|
||||
'cvol': '12345.2',
|
||||
'ovol': '11111.2',
|
||||
'nvol': '11111.4',
|
||||
'rdd': '12345,67890',
|
||||
'status': 'error'}
|
||||
]
|
||||
mock_api = mock.Mock()
|
||||
mock_api.update_replay_profiles.return_value = True
|
||||
self.driver._fixup_types(mock_api, items)
|
||||
expected = [{'volume': {'id': fake.VOLUME_ID},
|
||||
'specs': {'replay_profile_string': 'pro'},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.100',
|
||||
'cvol': '12345.1',
|
||||
'ovol': '11111.1',
|
||||
'nvol': '11111.3',
|
||||
'rdd': '12345,67890',
|
||||
'status': 'available'},
|
||||
{'volume': {'id': fake.VOLUME2_ID},
|
||||
'specs': {'replay_profile_string': 'pro'},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.200',
|
||||
'cvol': '12345.2',
|
||||
'ovol': '11111.2',
|
||||
'nvol': '11111.4',
|
||||
'rdd': '12345,67890',
|
||||
'status': 'error'}]
|
||||
self.assertEqual(expected, items)
|
||||
|
||||
def test_reattach_remaining_replications(self,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
self.driver.replication_enabled = True
|
||||
self.driver.failed_over = True
|
||||
self.driver.active_backend_id = 12345
|
||||
self.driver.primaryssn = 11111
|
||||
backends = self.driver.backends
|
||||
self.driver.backends = [{'target_device_id': '12345',
|
||||
'qosnode': 'cinderqos'},
|
||||
{'target_device_id': '67890',
|
||||
'qosnode': 'cinderqos'}]
|
||||
items = [{'volume': {'id': fake.VOLUME_ID},
|
||||
'specs': {'replicationtype': 'Synchronous',
|
||||
'activereplay': False},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.100',
|
||||
'cvol': '12345.1',
|
||||
'ovol': '11111.1',
|
||||
'nvol': '11111.3',
|
||||
'rdd': '12345',
|
||||
'status': 'synced'},
|
||||
{'volume': {'id': fake.VOLUME2_ID},
|
||||
'specs': {'replicationtype': 'Asynchronous',
|
||||
'activereplay': True},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.200',
|
||||
'cvol': '12345.2',
|
||||
'ovol': '11111.2',
|
||||
'nvol': '11111.4',
|
||||
'rdd': '12345',
|
||||
'status': 'synced'}
|
||||
]
|
||||
mock_api = mock.Mock()
|
||||
mock_api.ssn = self.driver.active_backend_id
|
||||
mock_api.get_volume.return_value = self.VOLUME
|
||||
mock_api.find_repl_volume.return_value = self.VOLUME
|
||||
mock_api.start_replication.side_effect = [{'instanceId': '11111.1001'},
|
||||
{'instanceId': '11111.1002'},
|
||||
None,
|
||||
{'instanceId': '11111.1001'}]
|
||||
self.driver._reattach_remaining_replications(mock_api, items)
|
||||
|
||||
expected = [{'volume': {'id': fake.VOLUME_ID},
|
||||
'specs': {'replicationtype': 'Synchronous',
|
||||
'activereplay': False},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.100',
|
||||
'cvol': '12345.1',
|
||||
'ovol': '11111.1',
|
||||
'nvol': '11111.3',
|
||||
'rdd': '12345,67890',
|
||||
'status': 'reattached'},
|
||||
{'volume': {'id': fake.VOLUME2_ID},
|
||||
'specs': {'replicationtype': 'Asynchronous',
|
||||
'activereplay': True},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.200',
|
||||
'cvol': '12345.2',
|
||||
'ovol': '11111.2',
|
||||
'nvol': '11111.4',
|
||||
'rdd': '12345,67890',
|
||||
'status': 'reattached'}]
|
||||
self.assertEqual(expected, items)
|
||||
mock_api.start_replication.assert_any_call(self.VOLUME, self.VOLUME,
|
||||
'Synchronous', 'cinderqos',
|
||||
False)
|
||||
|
||||
mock_api.start_replication.assert_any_call(self.VOLUME, self.VOLUME,
|
||||
'Asynchronous', 'cinderqos',
|
||||
True)
|
||||
items = [{'volume': {'id': fake.VOLUME_ID},
|
||||
'specs': {'replicationtype': 'Synchronous',
|
||||
'activereplay': False},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.100',
|
||||
'cvol': '12345.1',
|
||||
'ovol': '11111.1',
|
||||
'nvol': '11111.3',
|
||||
'rdd': '12345',
|
||||
'status': 'synced'},
|
||||
{'volume': {'id': fake.VOLUME2_ID},
|
||||
'specs': {'replicationtype': 'Asynchronous',
|
||||
'activereplay': True},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.200',
|
||||
'cvol': '12345.2',
|
||||
'ovol': '11111.2',
|
||||
'nvol': '11111.4',
|
||||
'rdd': '12345',
|
||||
'status': 'synced'}
|
||||
]
|
||||
self.driver._reattach_remaining_replications(mock_api, items)
|
||||
|
||||
expected = [{'volume': {'id': fake.VOLUME_ID},
|
||||
'specs': {'replicationtype': 'Synchronous',
|
||||
'activereplay': False},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.100',
|
||||
'cvol': '12345.1',
|
||||
'ovol': '11111.1',
|
||||
'nvol': '11111.3',
|
||||
'rdd': '12345',
|
||||
'status': 'error'},
|
||||
{'volume': {'id': fake.VOLUME2_ID},
|
||||
'specs': {'replicationtype': 'Asynchronous',
|
||||
'activereplay': True},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.200',
|
||||
'cvol': '12345.2',
|
||||
'ovol': '11111.2',
|
||||
'nvol': '11111.4',
|
||||
'rdd': '12345,67890',
|
||||
'status': 'reattached'}]
|
||||
self.assertEqual(expected, items)
|
||||
mock_api.start_replication.assert_any_call(self.VOLUME, self.VOLUME,
|
||||
'Synchronous', 'cinderqos',
|
||||
False)
|
||||
|
||||
mock_api.start_replication.assert_any_call(self.VOLUME, self.VOLUME,
|
||||
'Asynchronous', 'cinderqos',
|
||||
True)
|
||||
|
||||
self.driver.backends = backends
|
||||
|
||||
def _setup_items(self):
|
||||
self.driver.replication_enabled = True
|
||||
self.driver.failed_over = True
|
||||
self.driver.active_backend_id = 12345
|
||||
self.driver.primaryssn = 11111
|
||||
backends = self.driver.backends
|
||||
self.driver.backends = [{'target_device_id': '12345',
|
||||
'qosnode': 'cinderqos'},
|
||||
{'target_device_id': '67890',
|
||||
'qosnode': 'cinderqos'}]
|
||||
volumes = [{'id': fake.VOLUME_ID,
|
||||
'replication_driver_data': '12345',
|
||||
'provider_id': '12345.1'},
|
||||
{'id': fake.VOLUME2_ID,
|
||||
'replication_driver_data': '12345',
|
||||
'provider_id': '12345.2'}]
|
||||
|
||||
items = [{'volume': volumes[0],
|
||||
'specs': {'replay_profile_string': 'pro',
|
||||
'replicationtype': 'Asynchronous',
|
||||
'activereplay': True},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.100',
|
||||
'cvol': '12345.1',
|
||||
'ovol': '11111.1',
|
||||
'nvol': '11111.3',
|
||||
'rdd': '12345',
|
||||
'status': 'inprogress'},
|
||||
{'volume': volumes[1],
|
||||
'specs': {'replay_profile_string': 'pro',
|
||||
'replicationtype': 'Asynchronous',
|
||||
'activereplay': True},
|
||||
'qosnode': 'cinderqos',
|
||||
'screpl': '12345.200',
|
||||
'cvol': '12345.2',
|
||||
'ovol': '11111.2',
|
||||
'nvol': '11111.4',
|
||||
'rdd': '12345',
|
||||
'status': 'inprogress'}
|
||||
]
|
||||
return items, backends
|
||||
|
||||
def test_wait_for_replication(self,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
items, backends = self._setup_items()
|
||||
expected = []
|
||||
for item in items:
|
||||
expected.append(dict(item))
|
||||
expected[0]['status'] = 'synced'
|
||||
expected[1]['status'] = 'synced'
|
||||
mock_api = mock.Mock()
|
||||
mock_api.flip_replication.return_value = True
|
||||
mock_api.get_volume.return_value = self.VOLUME
|
||||
mock_api.replication_progress.return_value = (True, 0)
|
||||
mock_api.rename_volume.return_value = True
|
||||
self.driver._wait_for_replication(mock_api, items)
|
||||
self.assertEqual(expected, items)
|
||||
self.backends = backends
|
||||
|
||||
def test_wait_for_replication_flip_flops(self,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
items, backends = self._setup_items()
|
||||
expected = []
|
||||
for item in items:
|
||||
expected.append(dict(item))
|
||||
expected[0]['status'] = 'synced'
|
||||
expected[1]['status'] = 'error'
|
||||
mock_api = mock.Mock()
|
||||
mock_api.flip_replication.side_effect = [True, False]
|
||||
mock_api.get_volume.return_value = self.VOLUME
|
||||
mock_api.replication_progress.return_value = (True, 0)
|
||||
mock_api.rename_volume.return_value = True
|
||||
self.driver._wait_for_replication(mock_api, items)
|
||||
self.assertEqual(expected, items)
|
||||
self.backends = backends
|
||||
|
||||
def test_wait_for_replication_flip_no_vol(self,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
items, backends = self._setup_items()
|
||||
expected = []
|
||||
for item in items:
|
||||
expected.append(dict(item))
|
||||
expected[0]['status'] = 'synced'
|
||||
expected[1]['status'] = 'error'
|
||||
mock_api = mock.Mock()
|
||||
mock_api.flip_replication.return_value = True
|
||||
mock_api.get_volume.side_effect = [self.VOLUME, self.VOLUME,
|
||||
self.VOLUME,
|
||||
self.VOLUME, None]
|
||||
mock_api.replication_progress.return_value = (True, 0)
|
||||
mock_api.rename_volume.return_value = True
|
||||
self.driver._wait_for_replication(mock_api, items)
|
||||
self.assertEqual(expected, items)
|
||||
self.backends = backends
|
||||
|
||||
def test_wait_for_replication_cant_find_orig(self,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
items, backends = self._setup_items()
|
||||
expected = []
|
||||
for item in items:
|
||||
expected.append(dict(item))
|
||||
expected[0]['status'] = 'synced'
|
||||
expected[1]['status'] = 'synced'
|
||||
mock_api = mock.Mock()
|
||||
mock_api.flip_replication.return_value = True
|
||||
mock_api.get_volume.side_effect = [self.VOLUME, self.VOLUME,
|
||||
None,
|
||||
self.VOLUME, self.VOLUME,
|
||||
None]
|
||||
mock_api.replication_progress.return_value = (True, 0)
|
||||
mock_api.rename_volume.return_value = True
|
||||
self.driver._wait_for_replication(mock_api, items)
|
||||
self.assertEqual(expected, items)
|
||||
self.backends = backends
|
||||
|
||||
def test_wait_for_replication_rename_fail(self,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
items, backends = self._setup_items()
|
||||
expected = []
|
||||
for item in items:
|
||||
expected.append(dict(item))
|
||||
expected[0]['status'] = 'synced'
|
||||
expected[1]['status'] = 'synced'
|
||||
mock_api = mock.Mock()
|
||||
mock_api.flip_replication.return_value = True
|
||||
mock_api.get_volume.return_value = self.VOLUME
|
||||
mock_api.replication_progress.return_value = (True, 0)
|
||||
mock_api.rename_volume.return_value = True
|
||||
self.driver._wait_for_replication(mock_api, items)
|
||||
self.assertEqual(expected, items)
|
||||
self.backends = backends
|
||||
|
||||
def test_wait_for_replication_timeout(self,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
items, backends = self._setup_items()
|
||||
expected = []
|
||||
for item in items:
|
||||
expected.append(dict(item))
|
||||
expected[0]['status'] = 'error'
|
||||
expected[1]['status'] = 'error'
|
||||
self.assertNotEqual(items, expected)
|
||||
mock_api = mock.Mock()
|
||||
mock_api.get_volume.side_effect = [self.VOLUME, self.VOLUME,
|
||||
self.VOLUME,
|
||||
self.VOLUME, None]
|
||||
mock_api.replication_progress.return_value = (False, 500)
|
||||
self.driver.failback_timeout = 1
|
||||
self.driver._wait_for_replication(mock_api, items)
|
||||
self.assertEqual(expected, items)
|
||||
self.backends = backends
|
||||
|
||||
@mock.patch.object(dell_storagecenter_iscsi.DellStorageCenterISCSIDriver,
|
||||
'_get_volume_extra_specs')
|
||||
def test_parse_extraspecs(self,
|
||||
mock_get_volume_extra_specs,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
volume = {'id': fake.VOLUME_ID}
|
||||
mock_get_volume_extra_specs.return_value = {}
|
||||
ret = self.driver._parse_extraspecs(volume)
|
||||
expected = {'replicationtype': 'Asynchronous',
|
||||
'activereplay': False,
|
||||
'storage_profile': None,
|
||||
'replay_profile_string': None}
|
||||
self.assertEqual(expected, ret)
|
||||
|
||||
def test_get_qos(self,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
backends = self.driver.backends
|
||||
self.driver.backends = [{'target_device_id': '12345',
|
||||
'qosnode': 'cinderqos1'},
|
||||
{'target_device_id': '67890',
|
||||
'qosnode': 'cinderqos2'}]
|
||||
ret = self.driver._get_qos(12345)
|
||||
self.assertEqual('cinderqos1', ret)
|
||||
ret = self.driver._get_qos(67890)
|
||||
self.assertEqual('cinderqos2', ret)
|
||||
ret = self.driver._get_qos(11111)
|
||||
self.assertIsNone(ret)
|
||||
self.driver.backends[0] = {'target_device_id': '12345'}
|
||||
ret = self.driver._get_qos(12345)
|
||||
self.assertEqual('cinderqos', ret)
|
||||
self.driver.backends = backends
|
||||
|
@ -6020,8 +6020,12 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
destssn = 65495
|
||||
expected = 'StorageCenter/ScReplication/%s' % (
|
||||
self.SCREPL[0]['instanceId'])
|
||||
expected_payload = {'DeleteDestinationVolume': True,
|
||||
'RecycleDestinationVolume': False,
|
||||
'DeleteRestorePoint': True}
|
||||
ret = self.scapi.delete_replication(self.VOLUME, destssn)
|
||||
mock_delete.assert_any_call(expected, True)
|
||||
mock_delete.assert_any_call(expected, payload=expected_payload,
|
||||
async=True)
|
||||
self.assertTrue(ret)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
@ -6053,8 +6057,12 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
destssn = 65495
|
||||
expected = 'StorageCenter/ScReplication/%s' % (
|
||||
self.SCREPL[0]['instanceId'])
|
||||
expected_payload = {'DeleteDestinationVolume': True,
|
||||
'RecycleDestinationVolume': False,
|
||||
'DeleteRestorePoint': True}
|
||||
ret = self.scapi.delete_replication(self.VOLUME, destssn)
|
||||
mock_delete.assert_any_call(expected, True)
|
||||
mock_delete.assert_any_call(expected, payload=expected_payload,
|
||||
async=True)
|
||||
self.assertFalse(ret)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
@ -6193,7 +6201,7 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
ret = self.scapi._find_repl_volume('guid', 65495)
|
||||
ret = self.scapi.find_repl_volume('guid', 65495)
|
||||
self.assertDictEqual(self.SCREPL[0], ret)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.HttpClient,
|
||||
@ -6208,7 +6216,7 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
ret = self.scapi._find_repl_volume('guid', 65495)
|
||||
ret = self.scapi.find_repl_volume('guid', 65495)
|
||||
self.assertIsNone(ret)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.HttpClient,
|
||||
@ -6223,7 +6231,7 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
ret = self.scapi._find_repl_volume('guid', 65495)
|
||||
ret = self.scapi.find_repl_volume('guid', 65495)
|
||||
self.assertIsNone(ret)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.HttpClient,
|
||||
@ -6234,13 +6242,13 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
ret = self.scapi._find_repl_volume('guid', 65495)
|
||||
ret = self.scapi.find_repl_volume('guid', 65495)
|
||||
self.assertIsNone(ret)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'get_screplication')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'_find_repl_volume')
|
||||
'find_repl_volume')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'find_volume')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
@ -6474,6 +6482,228 @@ class DellSCSanAPITestCase(test.TestCase):
|
||||
ret = self.scapi.unmanage_replay(screplay)
|
||||
self.assertFalse(ret)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'_get_replay_list')
|
||||
def test_find_common_replay(self,
|
||||
mock_get_replay_list,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
dreplays = [{'globalIndex': '11111.113'},
|
||||
{'globalIndex': '11111.112'},
|
||||
{'globalIndex': '11111.111'}]
|
||||
sreplays = [{'globalIndex': '12345.112'},
|
||||
{'globalIndex': '12345.111'},
|
||||
{'globalIndex': '11111.112'},
|
||||
{'globalIndex': '11111.111'}]
|
||||
xreplays = [{'globalIndex': '12345.112'},
|
||||
{'globalIndex': '12345.111'}]
|
||||
mock_get_replay_list.side_effect = [dreplays, sreplays,
|
||||
dreplays, xreplays]
|
||||
ret = self.scapi.find_common_replay({'instanceId': '12345.1'},
|
||||
{'instanceId': '11111.1'})
|
||||
self.assertEqual({'globalIndex': '11111.112'}, ret)
|
||||
ret = self.scapi.find_common_replay(None, {'instanceId': '11111.1'})
|
||||
self.assertIsNone(ret)
|
||||
ret = self.scapi.find_common_replay({'instanceId': '12345.1'}, None)
|
||||
self.assertIsNone(ret)
|
||||
ret = self.scapi.find_common_replay({'instanceId': '12345.1'},
|
||||
{'instanceId': '11111.1'})
|
||||
self.assertIsNone(ret)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'_find_qos')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'_get_json')
|
||||
@mock.patch.object(dell_storagecenter_api.HttpClient,
|
||||
'post')
|
||||
def test_start_replication(self,
|
||||
mock_post,
|
||||
mock_get_json,
|
||||
mock_find_qos,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
svolume = {'name': 'guida', 'instanceId': '12345.101',
|
||||
'scSerialNumber': 12345}
|
||||
dvolume = {'name': 'guidb', 'instanceId': '11111.101',
|
||||
'scSerialNumber': 11111}
|
||||
mock_post.return_value = self.RESPONSE_200
|
||||
mock_get_json.return_value = {'instanceId': '12345.201'}
|
||||
mock_find_qos.return_value = {'instanceId': '12345.1'}
|
||||
expected = {'QosNode': '12345.1',
|
||||
'SourceVolume': '12345.101',
|
||||
'StorageCenter': 12345,
|
||||
'ReplicateActiveReplay': False,
|
||||
'Type': 'Asynchronous',
|
||||
'DestinationVolume': '11111.101',
|
||||
'DestinationStorageCenter': 11111}
|
||||
ret = self.scapi.start_replication(svolume, dvolume, 'Asynchronous',
|
||||
'cinderqos', False)
|
||||
self.assertEqual(mock_get_json.return_value, ret)
|
||||
mock_post.assert_called_once_with('StorageCenter/ScReplication',
|
||||
expected, True)
|
||||
mock_post.return_value = self.RESPONSE_400
|
||||
ret = self.scapi.start_replication(svolume, dvolume, 'Asynchronous',
|
||||
'cinderqos', False)
|
||||
self.assertIsNone(ret)
|
||||
mock_post.return_value = self.RESPONSE_200
|
||||
mock_find_qos.return_value = None
|
||||
ret = self.scapi.start_replication(svolume, dvolume, 'Asynchronous',
|
||||
'cinderqos', False)
|
||||
self.assertIsNone(ret)
|
||||
mock_find_qos.return_value = {'instanceId': '12345.1'}
|
||||
ret = self.scapi.start_replication(None, dvolume, 'Asynchronous',
|
||||
'cinderqos', False)
|
||||
self.assertIsNone(ret)
|
||||
ret = self.scapi.start_replication(svolume, None, 'Asynchronous',
|
||||
'cinderqos', False)
|
||||
self.assertIsNone(ret)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'find_common_replay')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'create_replay')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'start_replication')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'_get_json')
|
||||
@mock.patch.object(dell_storagecenter_api.HttpClient,
|
||||
'post')
|
||||
def test_replicate_to_common(self,
|
||||
mock_post,
|
||||
mock_get_json,
|
||||
mock_start_replication,
|
||||
mock_create_replay,
|
||||
mock_find_common_replay,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
creplay = {'instanceId': '11111.201'}
|
||||
svolume = {'name': 'guida'}
|
||||
dvolume = {'name': 'guidb', 'volumeFolder': {'instanceId': '11111.1'}}
|
||||
vvolume = {'name': 'guidc'}
|
||||
mock_find_common_replay.return_value = creplay
|
||||
mock_post.return_value = self.RESPONSE_200
|
||||
mock_get_json.return_value = vvolume
|
||||
mock_create_replay.return_value = {'instanceId': '12345.202'}
|
||||
mock_start_replication.return_value = {'instanceId': '12345.203'}
|
||||
# Simple common test.
|
||||
ret = self.scapi.replicate_to_common(svolume, dvolume, 'cinderqos')
|
||||
self.assertEqual(mock_start_replication.return_value, ret)
|
||||
mock_post.assert_called_once_with(
|
||||
'StorageCenter/ScReplay/11111.201/CreateView',
|
||||
{'Name': 'fback:guidb',
|
||||
'Notes': 'Created by Dell Cinder Driver',
|
||||
'VolumeFolder': '11111.1'},
|
||||
True)
|
||||
mock_create_replay.assert_called_once_with(svolume, 'failback', 600)
|
||||
mock_start_replication.assert_called_once_with(svolume, vvolume,
|
||||
'Asynchronous',
|
||||
'cinderqos',
|
||||
False)
|
||||
mock_create_replay.return_value = None
|
||||
# Unable to create a replay.
|
||||
ret = self.scapi.replicate_to_common(svolume, dvolume, 'cinderqos')
|
||||
self.assertIsNone(ret)
|
||||
mock_create_replay.return_value = {'instanceId': '12345.202'}
|
||||
mock_get_json.return_value = None
|
||||
# Create view volume fails.
|
||||
ret = self.scapi.replicate_to_common(svolume, dvolume, 'cinderqos')
|
||||
self.assertIsNone(ret)
|
||||
mock_get_json.return_value = vvolume
|
||||
mock_post.return_value = self.RESPONSE_400
|
||||
# Post call returns an error.
|
||||
ret = self.scapi.replicate_to_common(svolume, dvolume, 'cinderqos')
|
||||
self.assertIsNone(ret)
|
||||
mock_post.return_value = self.RESPONSE_200
|
||||
mock_find_common_replay.return_value = None
|
||||
# No common replay found.
|
||||
ret = self.scapi.replicate_to_common(svolume, dvolume, 'cinderqos')
|
||||
self.assertIsNone(ret)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'delete_replication')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'start_replication')
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'rename_volume')
|
||||
def test_flip_replication(self,
|
||||
mock_rename_volume,
|
||||
mock_start_replication,
|
||||
mock_delete_replication,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
svolume = {'scSerialNumber': '12345.1'}
|
||||
dvolume = {'scSerialNumber': '11111.1'}
|
||||
name = 'guid'
|
||||
replicationtype = 'Synchronous'
|
||||
qosnode = 'cinderqos'
|
||||
activereplay = True
|
||||
mock_delete_replication.return_value = True
|
||||
mock_start_replication.return_value = {'instanceId': '11111.101'}
|
||||
mock_rename_volume.return_value = True
|
||||
# Good run.
|
||||
ret = self.scapi.flip_replication(svolume, dvolume, name,
|
||||
replicationtype, qosnode,
|
||||
activereplay)
|
||||
self.assertTrue(ret)
|
||||
mock_delete_replication.assert_called_once_with(svolume, '11111.1',
|
||||
False)
|
||||
mock_start_replication.assert_called_once_with(dvolume, svolume,
|
||||
replicationtype,
|
||||
qosnode, activereplay)
|
||||
mock_rename_volume.assert_any_call(svolume, 'Cinder repl of guid')
|
||||
mock_rename_volume.assert_any_call(dvolume, 'guid')
|
||||
mock_rename_volume.return_value = False
|
||||
# Unable to rename volumes.
|
||||
ret = self.scapi.flip_replication(svolume, dvolume, name,
|
||||
replicationtype, qosnode,
|
||||
activereplay)
|
||||
self.assertFalse(ret)
|
||||
mock_rename_volume.return_value = True
|
||||
mock_start_replication.return_value = None
|
||||
# Start replication call fails.
|
||||
ret = self.scapi.flip_replication(svolume, dvolume, name,
|
||||
replicationtype, qosnode,
|
||||
activereplay)
|
||||
self.assertFalse(ret)
|
||||
mock_delete_replication.return_value = False
|
||||
mock_start_replication.return_value = {'instanceId': '11111.101'}
|
||||
# Delete old replication call fails.
|
||||
ret = self.scapi.flip_replication(svolume, dvolume, name,
|
||||
replicationtype, qosnode,
|
||||
activereplay)
|
||||
self.assertFalse(ret)
|
||||
|
||||
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
|
||||
'_get_json')
|
||||
@mock.patch.object(dell_storagecenter_api.HttpClient,
|
||||
'get')
|
||||
def test_replication_progress(self,
|
||||
mock_get,
|
||||
mock_get_json,
|
||||
mock_close_connection,
|
||||
mock_open_connection,
|
||||
mock_init):
|
||||
mock_get.return_value = self.RESPONSE_200
|
||||
mock_get_json.return_value = {'synced': True,
|
||||
'amountRemaining': '0 Bytes'}
|
||||
# Good run
|
||||
retbool, retnum = self.scapi.replication_progress('11111.101')
|
||||
self.assertTrue(retbool)
|
||||
self.assertEqual(0.0, retnum)
|
||||
# SC replication ID is None.
|
||||
retbool, retnum = self.scapi.replication_progress(None)
|
||||
self.assertIsNone(retbool)
|
||||
self.assertIsNone(retnum)
|
||||
mock_get.return_value = self.RESPONSE_400
|
||||
# Get progress call fails.
|
||||
retbool, retnum = self.scapi.replication_progress('11111.101')
|
||||
self.assertIsNone(retbool)
|
||||
self.assertIsNone(retnum)
|
||||
|
||||
|
||||
class DellSCSanAPIConnectionTestCase(test.TestCase):
|
||||
|
||||
|
@ -197,8 +197,17 @@ class HttpClient(object):
|
||||
verify=self.verify), async)
|
||||
|
||||
@utils.retry(exceptions=(requests.ConnectionError,))
|
||||
def delete(self, url, async=False):
|
||||
LOG.debug('delete: %(url)s', {'url': url})
|
||||
def delete(self, url, payload=None, async=False):
|
||||
LOG.debug('delete: %(url)s data: %(payload)s',
|
||||
{'url': url, 'payload': payload})
|
||||
if payload:
|
||||
return self._rest_ret(
|
||||
self.session.delete(self.__formatUrl(url),
|
||||
data=json.dumps(payload,
|
||||
ensure_ascii=False
|
||||
).encode('utf-8'),
|
||||
headers=self._get_header(async),
|
||||
verify=self.verify), async)
|
||||
return self._rest_ret(
|
||||
self.session.delete(self.__formatUrl(url),
|
||||
headers=self._get_header(async),
|
||||
@ -217,7 +226,7 @@ class StorageCenterApiHelper(object):
|
||||
# Now that active_backend_id is set on failover.
|
||||
# Use that if set. Mark the backend as failed over.
|
||||
self.active_backend_id = active_backend_id
|
||||
self.ssn = self.config.dell_sc_ssn
|
||||
self.primaryssn = self.config.dell_sc_ssn
|
||||
self.storage_protocol = storage_protocol
|
||||
self.apiversion = '2.0'
|
||||
|
||||
@ -229,9 +238,9 @@ class StorageCenterApiHelper(object):
|
||||
"""
|
||||
connection = None
|
||||
LOG.info(_LI('open_connection to %(ssn)s at %(ip)s'),
|
||||
{'ssn': self.ssn,
|
||||
{'ssn': self.primaryssn,
|
||||
'ip': self.config.san_ip})
|
||||
if self.ssn:
|
||||
if self.primaryssn:
|
||||
"""Open connection to REST API."""
|
||||
connection = StorageCenterApi(self.config.san_ip,
|
||||
self.config.dell_sc_api_port,
|
||||
@ -244,17 +253,16 @@ class StorageCenterApiHelper(object):
|
||||
# about.
|
||||
connection.vfname = self.config.dell_sc_volume_folder
|
||||
connection.sfname = self.config.dell_sc_server_folder
|
||||
# Our primary SSN doesn't change
|
||||
connection.primaryssn = self.primaryssn
|
||||
if self.storage_protocol == 'FC':
|
||||
connection.protocol = 'FibreChannel'
|
||||
# Set appropriate ssn and failover state.
|
||||
if self.active_backend_id:
|
||||
# active_backend_id is a string. Convert to int.
|
||||
connection.ssn = int(self.active_backend_id)
|
||||
connection.failed_over = True
|
||||
else:
|
||||
|
||||
connection.ssn = self.ssn
|
||||
connection.failed_over = False
|
||||
connection.ssn = self.primaryssn
|
||||
# Open connection.
|
||||
connection.open_connection()
|
||||
# Save our api version for next time.
|
||||
@ -288,9 +296,10 @@ class StorageCenterApi(object):
|
||||
2.4.1 - Updated Replication support to V2.1.
|
||||
2.5.0 - ManageableSnapshotsVD implemented.
|
||||
3.0.0 - ProviderID utilized.
|
||||
3.1.0 - Failback Supported.
|
||||
"""
|
||||
|
||||
APIDRIVERVERSION = '3.0.0'
|
||||
APIDRIVERVERSION = '3.1.0'
|
||||
|
||||
def __init__(self, host, port, user, password, verify, apiversion):
|
||||
"""This creates a connection to Dell SC or EM.
|
||||
@ -306,6 +315,9 @@ class StorageCenterApi(object):
|
||||
self.notes = 'Created by Dell Cinder Driver'
|
||||
self.repl_prefix = 'Cinder repl of '
|
||||
self.ssn = None
|
||||
# primaryssn is the ssn of the SC we are configured to use. This
|
||||
# doesn't change in the case of a failover.
|
||||
self.primaryssn = None
|
||||
self.failed_over = False
|
||||
self.vfname = 'openstack'
|
||||
self.sfname = 'openstack'
|
||||
@ -489,6 +501,8 @@ class StorageCenterApi(object):
|
||||
|
||||
:raises: VolumeBackendAPIException.
|
||||
"""
|
||||
# Set our fo state.
|
||||
self.failed_over = (self.primaryssn != self.ssn)
|
||||
|
||||
# Login
|
||||
payload = {}
|
||||
@ -542,14 +556,15 @@ class StorageCenterApi(object):
|
||||
:param provider_id: Provider_id from an volume or snapshot object.
|
||||
:returns: True/False
|
||||
"""
|
||||
ret = False
|
||||
if provider_id:
|
||||
try:
|
||||
if provider_id.split('.')[0] == str(self.ssn):
|
||||
return True
|
||||
if provider_id.split('.')[0] == six.text_type(self.ssn):
|
||||
ret = True
|
||||
except Exception:
|
||||
LOG.error(_LE('_use_provider_id: provider_id %s is invalid!'),
|
||||
provider_id)
|
||||
return False
|
||||
return ret
|
||||
|
||||
def find_sc(self, ssn=-1):
|
||||
"""Check that the SC is there and being managed by EM.
|
||||
@ -936,20 +951,23 @@ class StorageCenterApi(object):
|
||||
|
||||
return scvolume
|
||||
|
||||
def _get_volume_list(self, name, deviceid, filterbyvfname=True):
|
||||
def _get_volume_list(self, name, deviceid, filterbyvfname=True, ssn=-1):
|
||||
"""Return the specified list of volumes.
|
||||
|
||||
:param name: Volume name.
|
||||
:param deviceid: Volume device ID on the SC backend.
|
||||
:param filterbyvfname: If set to true then this filters by the preset
|
||||
folder name.
|
||||
:param ssn: SSN to search on.
|
||||
:return: Returns the scvolume list or None.
|
||||
"""
|
||||
if ssn == -1:
|
||||
ssn = self.ssn
|
||||
result = None
|
||||
# We need a name or a device ID to find a volume.
|
||||
if name or deviceid:
|
||||
pf = self._get_payload_filter()
|
||||
pf.append('scSerialNumber', self.ssn)
|
||||
pf.append('scSerialNumber', ssn)
|
||||
if name is not None:
|
||||
pf.append('Name', name)
|
||||
if deviceid is not None:
|
||||
@ -1071,7 +1089,7 @@ class StorageCenterApi(object):
|
||||
# If we have an id then delete the volume.
|
||||
if provider_id:
|
||||
r = self.client.delete('StorageCenter/ScVolume/%s' % provider_id,
|
||||
True)
|
||||
async=True)
|
||||
if not self._check_result(r):
|
||||
msg = _('Error deleting volume %(ssn)s: %(volume)s') % {
|
||||
'ssn': self.ssn,
|
||||
@ -1528,8 +1546,7 @@ class StorageCenterApi(object):
|
||||
controller or not.
|
||||
:return: Nothing
|
||||
"""
|
||||
portals.append(address + ':' +
|
||||
six.text_type(port))
|
||||
portals.append(address + ':' + six.text_type(port))
|
||||
iqns.append(iqn)
|
||||
luns.append(lun)
|
||||
|
||||
@ -1694,7 +1711,8 @@ class StorageCenterApi(object):
|
||||
prosrv = profile.get('server')
|
||||
if prosrv is not None and self._get_id(prosrv) == serverid:
|
||||
r = self.client.delete('StorageCenter/ScMappingProfile/%s'
|
||||
% self._get_id(profile), True)
|
||||
% self._get_id(profile),
|
||||
async=True)
|
||||
if self._check_result(r):
|
||||
# Check our result in the json.
|
||||
result = self._get_json(r)
|
||||
@ -1957,7 +1975,7 @@ class StorageCenterApi(object):
|
||||
payload['Name'] = name
|
||||
r = self.client.put('StorageCenter/ScVolume/%s'
|
||||
% self._get_id(scvolume),
|
||||
payload)
|
||||
payload, True)
|
||||
if self._check_result(r):
|
||||
return True
|
||||
|
||||
@ -2044,7 +2062,7 @@ class StorageCenterApi(object):
|
||||
LOG.debug('ScServer delete %s', self._get_id(scserver))
|
||||
if scserver.get('deleteAllowed') is True:
|
||||
r = self.client.delete('StorageCenter/ScServer/%s'
|
||||
% self._get_id(scserver), True)
|
||||
% self._get_id(scserver), async=True)
|
||||
if self._check_result(r):
|
||||
LOG.debug('ScServer deleted.')
|
||||
else:
|
||||
@ -2106,7 +2124,7 @@ class StorageCenterApi(object):
|
||||
"""
|
||||
self.cg_except_on_no_support()
|
||||
r = self.client.delete('StorageCenter/ScReplayProfile/%s' %
|
||||
self._get_id(profile), True)
|
||||
self._get_id(profile), async=True)
|
||||
if self._check_result(r):
|
||||
LOG.info(_LI('Profile %s has been deleted.'),
|
||||
profile.get('name'))
|
||||
@ -2490,14 +2508,17 @@ class StorageCenterApi(object):
|
||||
'newname': newname}
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
def _find_qos(self, qosnode):
|
||||
def _find_qos(self, qosnode, ssn=-1):
|
||||
"""Find Dell SC QOS Node entry for replication.
|
||||
|
||||
:param qosnode: Name of qosnode.
|
||||
:param ssn: SSN to search on.
|
||||
:return: scqos node object.
|
||||
"""
|
||||
if ssn == -1:
|
||||
ssn = self.ssn
|
||||
pf = self._get_payload_filter()
|
||||
pf.append('scSerialNumber', self.ssn)
|
||||
pf.append('scSerialNumber', ssn)
|
||||
pf.append('name', qosnode)
|
||||
r = self.client.post('StorageCenter/ScReplicationQosNode/GetList',
|
||||
pf.payload)
|
||||
@ -2509,7 +2530,7 @@ class StorageCenterApi(object):
|
||||
payload = {}
|
||||
payload['LinkSpeed'] = '1 Gbps'
|
||||
payload['Name'] = qosnode
|
||||
payload['StorageCenter'] = self.ssn
|
||||
payload['StorageCenter'] = ssn
|
||||
payload['BandwidthLimited'] = False
|
||||
r = self.client.post('StorageCenter/ScReplicationQosNode',
|
||||
payload, True)
|
||||
@ -2565,17 +2586,23 @@ class StorageCenterApi(object):
|
||||
'ssn': destssn})
|
||||
return None
|
||||
|
||||
def delete_replication(self, scvolume, destssn):
|
||||
def delete_replication(self, scvolume, destssn, deletedestvolume=True):
|
||||
"""Deletes the SC replication object from scvolume to the destssn.
|
||||
|
||||
:param scvolume: Dell SC Volume object.
|
||||
:param destssn: SC the replication is replicating to.S
|
||||
:param destssn: SC the replication is replicating to.
|
||||
:param deletedestvolume: Delete or keep dest volume.
|
||||
:return: True on success. False on fail.
|
||||
"""
|
||||
replication = self.get_screplication(scvolume, destssn)
|
||||
if replication:
|
||||
payload = {}
|
||||
payload['DeleteDestinationVolume'] = deletedestvolume
|
||||
payload['RecycleDestinationVolume'] = False
|
||||
payload['DeleteRestorePoint'] = True
|
||||
r = self.client.delete('StorageCenter/ScReplication/%s' %
|
||||
self._get_id(replication), True)
|
||||
self._get_id(replication), payload=payload,
|
||||
async=True)
|
||||
if self._check_result(r):
|
||||
# check that we whacked the dest volume
|
||||
LOG.info(_LI('Replication %(vol)s to %(dest)s.'),
|
||||
@ -2662,25 +2689,32 @@ class StorageCenterApi(object):
|
||||
'destsc': destssn})
|
||||
return screpl
|
||||
|
||||
def _find_repl_volume(self, guid, destssn, instance_id=None):
|
||||
def find_repl_volume(self, name, destssn, instance_id=None,
|
||||
source=False, destination=True):
|
||||
"""Find our replay destination volume on the destssn.
|
||||
|
||||
:param guid: Volume ID.
|
||||
:param name: Name to search for.
|
||||
:param destssn: Where to look for the volume.
|
||||
:param instance_id: If we know our exact volume ID use that.
|
||||
:param source: Replication source boolen.
|
||||
:param destination: Replication destination boolean.
|
||||
:return: SC Volume object or None
|
||||
"""
|
||||
# Do a normal volume search.
|
||||
pf = self._get_payload_filter()
|
||||
pf.append('scSerialNumber', destssn)
|
||||
pf.append('ReplicationDestination', True)
|
||||
# Are we looking for a replication destination?
|
||||
pf.append('ReplicationDestination', destination)
|
||||
# Are we looking for a replication source?
|
||||
pf.append('ReplicationSource', source)
|
||||
# There is a chance we know the exact volume. If so then use that.
|
||||
if instance_id:
|
||||
pf.append('instanceId', instance_id)
|
||||
else:
|
||||
# Try the name.
|
||||
pf.append('Name', self._repl_name(guid))
|
||||
r = self.client.post('StorageCenter/ScVolume/GetList', pf.payload)
|
||||
pf.append('Name', name)
|
||||
r = self.client.post('StorageCenter/ScVolume/GetList',
|
||||
pf.payload)
|
||||
if self._check_result(r):
|
||||
volumes = self._get_json(r)
|
||||
if len(volumes) == 1:
|
||||
@ -2717,7 +2751,8 @@ class StorageCenterApi(object):
|
||||
# if we got our replication volume we can do this nicely.
|
||||
if screplication:
|
||||
replinstanceid = screplication['destinationVolume']['instanceId']
|
||||
screplvol = self._find_repl_volume(volumename, destssn, replinstanceid)
|
||||
screplvol = self.find_repl_volume(self._repl_name(volumename),
|
||||
destssn, replinstanceid)
|
||||
# delete_replication fails to delete replication without also
|
||||
# stuffing it into the recycle bin.
|
||||
# Instead we try to unmap the destination volume which will break
|
||||
@ -2728,3 +2763,147 @@ class StorageCenterApi(object):
|
||||
self.remove_mappings(scvolume)
|
||||
|
||||
return screplvol
|
||||
|
||||
def _get_replay_list(self, scvolume):
|
||||
r = self.client.get('StorageCenter/ScVolume/%s/ReplayList'
|
||||
% self._get_id(scvolume))
|
||||
if self._check_result(r):
|
||||
return self._get_json(r)
|
||||
return []
|
||||
|
||||
def find_common_replay(self, svolume, dvolume):
|
||||
"""Finds the common replay between two volumes.
|
||||
|
||||
This assumes that one volume was replicated from the other. This
|
||||
should return the most recent replay.
|
||||
|
||||
:param svolume: Source SC Volume.
|
||||
:param dvolume: Destination SC Volume.
|
||||
:return: Common replay or None.
|
||||
"""
|
||||
if svolume and dvolume:
|
||||
sreplays = self._get_replay_list(svolume)
|
||||
dreplays = self._get_replay_list(dvolume)
|
||||
for dreplay in dreplays:
|
||||
for sreplay in sreplays:
|
||||
if dreplay['globalIndex'] == sreplay['globalIndex']:
|
||||
return dreplay
|
||||
return None
|
||||
|
||||
def start_replication(self, svolume, dvolume,
|
||||
replicationtype, qosnode, activereplay):
|
||||
"""Starts a replication between volumes.
|
||||
|
||||
Requires the dvolume to be in an appropriate state to start this.
|
||||
|
||||
:param svolume: Source SC Volume.
|
||||
:param dvolume: Destiation SC Volume
|
||||
:param replicationtype: Asynchronous or synchronous.
|
||||
:param qosnode: QOS node name.
|
||||
:param activereplay: Boolean to replicate the active replay or not.
|
||||
:return: ScReplication object or None.
|
||||
"""
|
||||
if svolume and dvolume:
|
||||
qos = self._find_qos(qosnode, svolume['scSerialNumber'])
|
||||
if qos:
|
||||
payload = {}
|
||||
payload['QosNode'] = self._get_id(qos)
|
||||
payload['SourceVolume'] = self._get_id(svolume)
|
||||
payload['StorageCenter'] = svolume['scSerialNumber']
|
||||
# Have to replicate the active replay.
|
||||
payload['ReplicateActiveReplay'] = activereplay
|
||||
payload['Type'] = replicationtype
|
||||
payload['DestinationVolume'] = self._get_id(dvolume)
|
||||
payload['DestinationStorageCenter'] = dvolume['scSerialNumber']
|
||||
r = self.client.post('StorageCenter/ScReplication', payload,
|
||||
True)
|
||||
# 201 expected.
|
||||
if self._check_result(r):
|
||||
LOG.info(_LI('Replication created for '
|
||||
'%(src)s to %(dest)s'),
|
||||
{'src': svolume.get('name'),
|
||||
'dest': dvolume.get('name')})
|
||||
screpl = self._get_json(r)
|
||||
return screpl
|
||||
return None
|
||||
|
||||
def replicate_to_common(self, svolume, dvolume, qosnode):
|
||||
"""Reverses a replication between two volumes.
|
||||
|
||||
:param fovolume: Failed over volume. (Current)
|
||||
:param ovolume: Original source volume.
|
||||
:param qosnode: QOS node name to use to create the replay.
|
||||
:return: ScReplication object or None.
|
||||
"""
|
||||
# find our common replay.
|
||||
creplay = self.find_common_replay(svolume, dvolume)
|
||||
# if we found one.
|
||||
if creplay:
|
||||
# create a view volume from the common replay.
|
||||
payload = {}
|
||||
# funky name.
|
||||
payload['Name'] = 'fback:' + dvolume['name']
|
||||
payload['Notes'] = self.notes
|
||||
payload['VolumeFolder'] = self._get_id(dvolume['volumeFolder'])
|
||||
r = self.client.post('StorageCenter/ScReplay/%s/CreateView'
|
||||
% self._get_id(creplay), payload, True)
|
||||
if self._check_result(r):
|
||||
vvolume = self._get_json(r)
|
||||
if vvolume:
|
||||
# snap a replay and start replicating.
|
||||
if self.create_replay(svolume, 'failback', 600):
|
||||
return self.start_replication(svolume, vvolume,
|
||||
'Asynchronous', qosnode,
|
||||
False)
|
||||
# No joy. Error the volume.
|
||||
return None
|
||||
|
||||
def flip_replication(self, svolume, dvolume, name,
|
||||
replicationtype, qosnode, activereplay):
|
||||
"""Enables replication from current destination volume to source.
|
||||
|
||||
:param svolume: Current source. New destination.
|
||||
:param dvolume: Current destination. New source.
|
||||
:param name: Volume name.
|
||||
:param replicationtype: Sync or async
|
||||
:param qosnode: qos node for the new source ssn.
|
||||
:param activereplay: replicate the active replay.
|
||||
:return: True/False.
|
||||
"""
|
||||
# We are flipping a replication. That means there was a replication to
|
||||
# start with. Delete that.
|
||||
if self.delete_replication(svolume, dvolume['scSerialNumber'], False):
|
||||
# Kick off a replication going the other way.
|
||||
if self.start_replication(dvolume, svolume, replicationtype,
|
||||
qosnode, activereplay) is not None:
|
||||
# rename
|
||||
if (self.rename_volume(svolume, self._repl_name(name)) and
|
||||
self.rename_volume(dvolume, name)):
|
||||
return True
|
||||
LOG.warning(_LW('flip_replication: Unable to replicate '
|
||||
'%(name)s from %(src)s to %(dst)s'),
|
||||
{'name': name,
|
||||
'src': dvolume['scSerialNumber'],
|
||||
'dst': svolume['scSerialNumber']})
|
||||
return False
|
||||
|
||||
def replication_progress(self, screplid):
|
||||
"""Get's the current progress of the replication.
|
||||
|
||||
:param screplid: instanceId of the ScReplication object.
|
||||
:return: Boolean for synced, float of remaining bytes. (Or None, None.)
|
||||
"""
|
||||
if screplid:
|
||||
r = self.client.get(
|
||||
'StorageCenter/ScReplication/%s/CurrentProgress' % screplid)
|
||||
if self._check_result(r):
|
||||
progress = self._get_json(r)
|
||||
try:
|
||||
remaining = float(
|
||||
progress['amountRemaining'].split(' ', 1)[0])
|
||||
return progress['synced'], remaining
|
||||
except Exception:
|
||||
LOG.warning(_LW('replication_progress: Invalid replication'
|
||||
' progress information returned: %s'),
|
||||
progress)
|
||||
return None, None
|
||||
|
@ -12,9 +12,11 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import eventlet
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
import six
|
||||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _, _LE, _LI, _LW
|
||||
@ -65,6 +67,7 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
|
||||
self.active_backend_id = kwargs.get('active_backend_id', None)
|
||||
self.failed_over = (self.active_backend_id is not None)
|
||||
self.storage_protocol = 'iSCSI'
|
||||
self.failback_timeout = 30
|
||||
|
||||
def _bytes_to_gb(self, spacestring):
|
||||
"""Space is returned in a string like ...
|
||||
@ -936,6 +939,10 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
|
||||
host['host'] is its name, and host['capabilities'] is a
|
||||
dictionary of its reported capabilities (Not Used).
|
||||
"""
|
||||
LOG.info(_LI('retype: volume_name: %(name)s new_type: %(newtype)s '
|
||||
'diff: %(diff)s host: %(host)s'),
|
||||
{'name': volume.get('id'), 'newtype': new_type,
|
||||
'diff': diff, 'host': host})
|
||||
model_update = None
|
||||
# Any spec changes?
|
||||
if diff['extra_specs']:
|
||||
@ -980,11 +987,11 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
|
||||
'replication_enabled'))
|
||||
# if there is a change and it didn't work fast fail.
|
||||
if current != requested:
|
||||
if requested:
|
||||
if requested == '<is> True':
|
||||
model_update = self._create_replications(api,
|
||||
volume,
|
||||
scvolume)
|
||||
else:
|
||||
elif current == '<is> True':
|
||||
self._delete_replications(api, volume)
|
||||
model_update = {'replication_status': 'disabled',
|
||||
'replication_driver_data': ''}
|
||||
@ -1044,10 +1051,265 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
|
||||
return destssn
|
||||
|
||||
def _update_backend(self, active_backend_id):
|
||||
# Update our backend id. On the next open_connection it will use this.
|
||||
self.active_backend_id = str(active_backend_id)
|
||||
# Mark for failover or undo failover.
|
||||
LOG.debug('active_backend_id: %s', active_backend_id)
|
||||
if active_backend_id:
|
||||
self.active_backend_id = six.text_type(active_backend_id)
|
||||
self.failed_over = True
|
||||
else:
|
||||
self.active_backend_id = None
|
||||
self.failed_over = False
|
||||
|
||||
self._client.active_backend_id = self.active_backend_id
|
||||
|
||||
def _get_qos(self, targetssn):
|
||||
# Find our QOS.
|
||||
qosnode = None
|
||||
for backend in self.backends:
|
||||
if int(backend['target_device_id']) == targetssn:
|
||||
qosnode = backend.get('qosnode', 'cinderqos')
|
||||
return qosnode
|
||||
|
||||
def _parse_extraspecs(self, volume):
|
||||
# Digest our extra specs.
|
||||
extraspecs = {}
|
||||
specs = self._get_volume_extra_specs(volume)
|
||||
if specs.get('replication_type') == '<in> sync':
|
||||
extraspecs['replicationtype'] = 'Synchronous'
|
||||
else:
|
||||
extraspecs['replicationtype'] = 'Asynchronous'
|
||||
if specs.get('replication:activereplay') == '<is> True':
|
||||
extraspecs['activereplay'] = True
|
||||
else:
|
||||
extraspecs['activereplay'] = False
|
||||
extraspecs['storage_profile'] = specs.get('storagetype:storageprofile')
|
||||
extraspecs['replay_profile_string'] = (
|
||||
specs.get('storagetype:replayprofiles'))
|
||||
return extraspecs
|
||||
|
||||
def _wait_for_replication(self, api, items):
|
||||
# Wait for our replications to resync with their original volumes.
|
||||
# We wait for completion, errors or timeout.
|
||||
deadcount = 5
|
||||
lastremain = 0.0
|
||||
# The big wait loop.
|
||||
while True:
|
||||
# We run until all volumes are synced or in error.
|
||||
done = True
|
||||
currentremain = 0.0
|
||||
# Run the list.
|
||||
for item in items:
|
||||
# If we have one cooking.
|
||||
if item['status'] == 'inprogress':
|
||||
# Is it done?
|
||||
synced, remain = api.replication_progress(item['screpl'])
|
||||
currentremain += remain
|
||||
if synced:
|
||||
# It is! Get our volumes.
|
||||
cvol = api.get_volume(item['cvol'])
|
||||
nvol = api.get_volume(item['nvol'])
|
||||
|
||||
# Flip replication.
|
||||
if (cvol and nvol and api.flip_replication(
|
||||
cvol, nvol, item['volume']['id'],
|
||||
item['specs']['replicationtype'],
|
||||
item['qosnode'],
|
||||
item['specs']['activereplay'])):
|
||||
# rename the original. Doesn't matter if it
|
||||
# succeeded as we should have the provider_id
|
||||
# of the new volume.
|
||||
ovol = api.get_volume(item['ovol'])
|
||||
if not ovol or not api.rename_volume(
|
||||
ovol, 'org:' + ovol['name']):
|
||||
# Not a reason to fail but will possibly
|
||||
# cause confusion so warn.
|
||||
LOG.warning(_LW('Unable to locate and rename '
|
||||
'original volume: %s'),
|
||||
item['ovol'])
|
||||
item['status'] = 'synced'
|
||||
else:
|
||||
item['status'] = 'error'
|
||||
elif synced is None:
|
||||
# Couldn't get info on this one. Call it baked.
|
||||
item['status'] = 'error'
|
||||
else:
|
||||
# Miles to go before we're done.
|
||||
done = False
|
||||
# done? then leave.
|
||||
if done:
|
||||
break
|
||||
|
||||
# Confirm we are or are not still making progress.
|
||||
if lastremain == currentremain:
|
||||
# One chance down. Warn user.
|
||||
deadcount -= 1
|
||||
LOG.warning(_LW('Waiting for replications to complete. '
|
||||
'No progress for 30 seconds. deadcount = %d'),
|
||||
deadcount)
|
||||
else:
|
||||
# Reset
|
||||
lastremain = currentremain
|
||||
deadcount = 5
|
||||
|
||||
# If we've used up our 5 chances we error and log..
|
||||
if deadcount == 0:
|
||||
LOG.error(_LE('Replication progress has stopped.'))
|
||||
for item in items:
|
||||
if item['status'] == 'inprogress':
|
||||
LOG.error(_LE('Failback failed for volume: %s. '
|
||||
'Timeout waiting for replication to '
|
||||
'sync with original volume.'),
|
||||
item['volume']['id'])
|
||||
item['status'] = 'error'
|
||||
break
|
||||
# This is part of an async call so we should be good sleeping here.
|
||||
# Have to balance hammering the backend for no good reason with
|
||||
# the max timeout for the unit tests. Yeah, silly.
|
||||
eventlet.sleep(self.failback_timeout)
|
||||
|
||||
def _reattach_remaining_replications(self, api, items):
|
||||
# Wiffle through our backends and reattach any remaining replication
|
||||
# targets.
|
||||
for item in items:
|
||||
if item['status'] == 'synced':
|
||||
svol = api.get_volume(item['nvol'])
|
||||
# assume it went well. Will error out if not.
|
||||
item['status'] = 'reattached'
|
||||
# wiffle through our backends and kick off replications.
|
||||
for backend in self.backends:
|
||||
rssn = int(backend['target_device_id'])
|
||||
if rssn != api.ssn:
|
||||
rvol = api.find_repl_volume(item['volume']['id'],
|
||||
rssn, None)
|
||||
# if there is an old replication whack it.
|
||||
api.delete_replication(svol, rssn, False)
|
||||
if api.start_replication(
|
||||
svol, rvol,
|
||||
item['specs']['replicationtype'],
|
||||
self._get_qos(rssn),
|
||||
item['specs']['activereplay']):
|
||||
# Save our replication_driver_data.
|
||||
item['rdd'] += ','
|
||||
item['rdd'] += backend['target_device_id']
|
||||
else:
|
||||
# No joy. Bail
|
||||
item['status'] = 'error'
|
||||
|
||||
def _fixup_types(self, api, items):
|
||||
# Update our replay profiles.
|
||||
for item in items:
|
||||
if item['status'] == 'reattached':
|
||||
# Re-apply any appropriate replay profiles.
|
||||
item['status'] = 'available'
|
||||
rps = item['specs']['replay_profile_string']
|
||||
if rps:
|
||||
svol = api.get_volume(item['nvol'])
|
||||
if not api.update_replay_profiles(svol, rps):
|
||||
item['status'] = 'error'
|
||||
|
||||
def _volume_updates(self, items):
|
||||
# Update our volume updates.
|
||||
volume_updates = []
|
||||
for item in items:
|
||||
# Set our status for our replicated volumes
|
||||
model_update = {'provider_id': item['nvol'],
|
||||
'replication_driver_data': item['rdd']}
|
||||
# These are simple. If the volume reaches available then,
|
||||
# since we were replicating it, replication status must
|
||||
# be good. Else error/error.
|
||||
if item['status'] == 'available':
|
||||
model_update['status'] = 'available'
|
||||
model_update['replication_status'] = 'enabled'
|
||||
else:
|
||||
model_update['status'] = 'error'
|
||||
model_update['replication_status'] = 'error'
|
||||
volume_updates.append({'volume_id': item['volume']['id'],
|
||||
'updates': model_update})
|
||||
return volume_updates
|
||||
|
||||
def failback_volumes(self, volumes):
|
||||
"""This is a generic volume failback.
|
||||
|
||||
:param volumes: List of volumes that need to be failed back.
|
||||
:return: volume_updates for the list of volumes.
|
||||
"""
|
||||
LOG.info(_LI('failback_volumes'))
|
||||
with self._client.open_connection() as api:
|
||||
# Get our qosnode. This is a good way to make sure the backend
|
||||
# is still setup so that we can do this.
|
||||
qosnode = self._get_qos(api.ssn)
|
||||
if not qosnode:
|
||||
raise exception.VolumeBackendAPIException(
|
||||
message=_('Unable to failback. Backend is misconfigured.'))
|
||||
|
||||
volume_updates = []
|
||||
replitems = []
|
||||
screplid = None
|
||||
status = ''
|
||||
# Trundle through the volumes. Update non replicated to alive again
|
||||
# and reverse the replications for the remaining volumes.
|
||||
for volume in volumes:
|
||||
LOG.info(_LI('failback_volumes: starting volume: %s'), volume)
|
||||
model_update = {}
|
||||
if volume.get('replication_driver_data'):
|
||||
LOG.info(_LI('failback_volumes: replicated volume'))
|
||||
# Get our current volume.
|
||||
cvol = api.find_volume(volume['id'], volume['provider_id'])
|
||||
# Original volume on the primary.
|
||||
ovol = api.find_repl_volume(volume['id'], api.primaryssn,
|
||||
None, True, False)
|
||||
# Delete our current mappings.
|
||||
api.remove_mappings(cvol)
|
||||
# If there is a replication to delete do so.
|
||||
api.delete_replication(ovol, api.ssn, False)
|
||||
# Replicate to a common replay.
|
||||
screpl = api.replicate_to_common(cvol, ovol, 'tempqos')
|
||||
# We made it this far. Update our status.
|
||||
if screpl:
|
||||
screplid = screpl['instanceId']
|
||||
nvolid = screpl['destinationVolume']['instanceId']
|
||||
status = 'inprogress'
|
||||
else:
|
||||
LOG.error(_LE('Unable to restore %s'), volume['id'])
|
||||
screplid = None
|
||||
nvolid = None
|
||||
status = 'error'
|
||||
|
||||
# Save some information for the next step.
|
||||
# nvol is the new volume created by replicate_to_common.
|
||||
# We also grab our extra specs here.
|
||||
replitems.append(
|
||||
{'volume': volume,
|
||||
'specs': self._parse_extraspecs(volume),
|
||||
'qosnode': qosnode,
|
||||
'screpl': screplid,
|
||||
'cvol': cvol['instanceId'],
|
||||
'ovol': ovol['instanceId'],
|
||||
'nvol': nvolid,
|
||||
'rdd': six.text_type(api.ssn),
|
||||
'status': status})
|
||||
else:
|
||||
# Not replicated. Just set it to available.
|
||||
model_update = {'status': 'available'}
|
||||
# Either we are failed over or our status is now error.
|
||||
volume_updates.append({'volume_id': volume['id'],
|
||||
'updates': model_update})
|
||||
|
||||
if replitems:
|
||||
# Wait for replication to complete.
|
||||
# This will also flip replication.
|
||||
self._wait_for_replication(api, replitems)
|
||||
# Replications are done. Attach to any additional replication
|
||||
# backends.
|
||||
self._reattach_remaining_replications(api, replitems)
|
||||
self._fixup_types(api, replitems)
|
||||
volume_updates += self._volume_updates(replitems)
|
||||
|
||||
# Set us back to a happy state.
|
||||
# The only way this doesn't happen is if the primary is down.
|
||||
self._update_backend(None)
|
||||
return volume_updates
|
||||
|
||||
def failover_host(self, context, volumes, secondary_id=None):
|
||||
"""Failover to secondary.
|
||||
|
||||
@ -1066,10 +1328,16 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
|
||||
'replication_extended_status': 'whatever',...}},]
|
||||
"""
|
||||
|
||||
# We do not allow failback. Dragons be there.
|
||||
LOG.debug('failover-host')
|
||||
LOG.debug(self.failed_over)
|
||||
LOG.debug(self.active_backend_id)
|
||||
LOG.debug(self.replication_enabled)
|
||||
if self.failed_over:
|
||||
raise exception.VolumeBackendAPIException(message=_(
|
||||
'Backend has already been failed over. Unable to fail back.'))
|
||||
if secondary_id == 'default':
|
||||
LOG.debug('failing back')
|
||||
return 'default', self.failback_volumes(volumes)
|
||||
raise exception.VolumeBackendAPIException(
|
||||
message='Already failed over.')
|
||||
|
||||
LOG.info(_LI('Failing backend to %s'), secondary_id)
|
||||
# basic check
|
||||
@ -1111,6 +1379,10 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
|
||||
|
||||
# this is it.
|
||||
self._update_backend(destssn)
|
||||
LOG.debug('after update backend')
|
||||
LOG.debug(self.failed_over)
|
||||
LOG.debug(self.active_backend_id)
|
||||
LOG.debug(self.replication_enabled)
|
||||
return destssn, volume_updates
|
||||
else:
|
||||
raise exception.InvalidInput(message=(
|
||||
|
@ -51,10 +51,11 @@ class DellStorageCenterFCDriver(dell_storagecenter_common.DellCommonDriver,
|
||||
2.4.1 - Updated Replication support to V2.1.
|
||||
2.5.0 - ManageableSnapshotsVD implemented.
|
||||
3.0.0 - ProviderID utilized.
|
||||
3.1.0 - Failback Supported.
|
||||
|
||||
"""
|
||||
|
||||
VERSION = '3.0.0'
|
||||
VERSION = '3.1.0'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(DellStorageCenterFCDriver, self).__init__(*args, **kwargs)
|
||||
|
@ -50,10 +50,11 @@ class DellStorageCenterISCSIDriver(dell_storagecenter_common.DellCommonDriver,
|
||||
2.4.1 - Updated Replication support to V2.1.
|
||||
2.5.0 - ManageableSnapshotsVD implemented.
|
||||
3.0.0 - ProviderID utilized.
|
||||
3.1.0 - Failback Supported.
|
||||
|
||||
"""
|
||||
|
||||
VERSION = '3.0.0'
|
||||
VERSION = '3.1.0'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(DellStorageCenterISCSIDriver, self).__init__(*args, **kwargs)
|
||||
|
@ -0,0 +1,4 @@
|
||||
---
|
||||
features:
|
||||
- Added replication failback support for the Dell SC driver.
|
||||
|
Loading…
x
Reference in New Issue
Block a user