NetApp cDOT: Add cheesecake replication support
Add ability to failover a given host to a replication target provided, or chosen from amongst those configured. DocImpact Implements: blueprint netapp-cheesecake-replication-support Co-Authored-By: Clinton Knight <cknight@netapp.com> Change-Id: I87b92e76d0d5022e9be610b9e237b89417309c05
This commit is contained in:
parent
1a495922ed
commit
294ee65bd3
cinder
opts.pytest_common.py
tests/unit/volume/drivers/netapp
dataontap
client
fakes.pyperformance
test_block_7mode.pytest_block_base.pytest_block_cmode.pytest_nfs_7mode.pytest_nfs_base.pytest_nfs_cmode.pyutils
volume/drivers/netapp
releasenotes/notes
@ -226,6 +226,7 @@ def list_opts():
|
||||
cinder_volume_drivers_netapp_options.netapp_eseries_opts,
|
||||
cinder_volume_drivers_netapp_options.netapp_nfs_extra_opts,
|
||||
cinder_volume_drivers_netapp_options.netapp_san_opts,
|
||||
cinder_volume_drivers_netapp_options.netapp_replication_opts,
|
||||
cinder_volume_drivers_ibm_storwize_svc_storwizesvciscsi.
|
||||
storwize_svc_iscsi_opts,
|
||||
cinder_backup_drivers_glusterfs.glusterfsbackup_service_opts,
|
||||
|
@ -68,6 +68,10 @@ FAKE_NA_SERVER_API_1_20.set_vfiler('filer')
|
||||
FAKE_NA_SERVER_API_1_20.set_vserver('server')
|
||||
FAKE_NA_SERVER_API_1_20.set_api_version(1, 20)
|
||||
|
||||
VOLUME_VSERVER_NAME = 'fake_vserver'
|
||||
VOLUME_NAMES = ('volume1', 'volume2')
|
||||
VOLUME_NAME = 'volume1'
|
||||
|
||||
|
||||
FAKE_QUERY = {'volume-attributes': None}
|
||||
|
||||
@ -104,6 +108,20 @@ NO_RECORDS_RESPONSE = etree.XML("""
|
||||
</results>
|
||||
""")
|
||||
|
||||
VOLUME_GET_NAME_RESPONSE = etree.XML("""
|
||||
<results status="passed">
|
||||
<attributes-list>
|
||||
<volume-attributes>
|
||||
<volume-id-attributes>
|
||||
<name>%(volume)s</name>
|
||||
<owning-vserver-name>%(vserver)s</owning-vserver-name>
|
||||
</volume-id-attributes>
|
||||
</volume-attributes>
|
||||
</attributes-list>
|
||||
<num-records>1</num-records>
|
||||
</results>
|
||||
""" % {'volume': VOLUME_NAMES[0], 'vserver': VOLUME_VSERVER_NAME})
|
||||
|
||||
INVALID_GET_ITER_RESPONSE_NO_ATTRIBUTES = etree.XML("""
|
||||
<results status="passed">
|
||||
<num-records>1</num-records>
|
||||
@ -697,9 +715,6 @@ VOLUME_GET_ITER_CAPACITY_RESPONSE = etree.XML("""
|
||||
'total_size': VOLUME_SIZE_TOTAL,
|
||||
})
|
||||
|
||||
VOLUME_VSERVER_NAME = 'fake_vserver'
|
||||
VOLUME_NAMES = ('volume1', 'volume2')
|
||||
|
||||
VOLUME_GET_ITER_LIST_RESPONSE = etree.XML("""
|
||||
<results status="passed">
|
||||
<attributes-list>
|
||||
@ -733,6 +748,7 @@ VOLUME_GET_ITER_SSC_RESPONSE = etree.XML("""
|
||||
<junction-path>/%(volume)s</junction-path>
|
||||
<name>%(volume)s</name>
|
||||
<owning-vserver-name>%(vserver)s</owning-vserver-name>
|
||||
<type>rw</type>
|
||||
</volume-id-attributes>
|
||||
<volume-mirror-attributes>
|
||||
<is-data-protection-mirror>false</is-data-protection-mirror>
|
||||
@ -744,7 +760,15 @@ VOLUME_GET_ITER_SSC_RESPONSE = etree.XML("""
|
||||
<volume-space-attributes>
|
||||
<is-space-guarantee-enabled>true</is-space-guarantee-enabled>
|
||||
<space-guarantee>none</space-guarantee>
|
||||
<percentage-snapshot-reserve>5</percentage-snapshot-reserve>
|
||||
<size>12345</size>
|
||||
</volume-space-attributes>
|
||||
<volume-snapshot-attributes>
|
||||
<snapshot-policy>default</snapshot-policy>
|
||||
</volume-snapshot-attributes>
|
||||
<volume-language-attributes>
|
||||
<language-code>en_US</language-code>
|
||||
</volume-language-attributes>
|
||||
</volume-attributes>
|
||||
</attributes-list>
|
||||
<num-records>1</num-records>
|
||||
@ -761,6 +785,11 @@ VOLUME_INFO_SSC = {
|
||||
'junction-path': '/%s' % VOLUME_NAMES[0],
|
||||
'aggregate': VOLUME_AGGREGATE_NAMES[0],
|
||||
'space-guarantee-enabled': True,
|
||||
'language': 'en_US',
|
||||
'percentage-snapshot-reserve': '5',
|
||||
'snapshot-policy': 'default',
|
||||
'type': 'rw',
|
||||
'size': '12345',
|
||||
'space-guarantee': 'none',
|
||||
'qos-policy-group': 'fake_qos_policy_group_name',
|
||||
}
|
||||
@ -782,27 +811,6 @@ VOLUME_DEDUPE_INFO_SSC = {
|
||||
'dedupe': True,
|
||||
}
|
||||
|
||||
SNAPMIRROR_GET_ITER_RESPONSE = etree.XML("""
|
||||
<results status="passed">
|
||||
<attributes-list>
|
||||
<snapmirror-info>
|
||||
<destination-location>%(vserver)s:%(volume2)s</destination-location>
|
||||
<destination-volume>%(volume2)s</destination-volume>
|
||||
<destination-vserver>%(vserver)s</destination-vserver>
|
||||
<source-location>%(vserver)s:%(volume1)s</source-location>
|
||||
<source-volume>%(volume1)s</source-volume>
|
||||
<source-vserver>%(vserver)s</source-vserver>
|
||||
</snapmirror-info>
|
||||
</attributes-list>
|
||||
<num-records>1</num-records>
|
||||
</results>
|
||||
""" % {
|
||||
'volume1': VOLUME_NAMES[0],
|
||||
'volume2': VOLUME_NAMES[1],
|
||||
'vserver': VOLUME_VSERVER_NAME,
|
||||
})
|
||||
|
||||
|
||||
STORAGE_DISK_GET_ITER_RESPONSE_PAGE_1 = etree.XML("""
|
||||
<results status="passed">
|
||||
<attributes-list>
|
||||
@ -1123,3 +1131,139 @@ ISCSI_INITIATOR_GET_AUTH_ELEM = etree.XML("""
|
||||
ISCSI_INITIATOR_AUTH_LIST_INFO_FAILURE = etree.XML("""
|
||||
<results status="failed" errno="13112" reason="Initiator %s not found,
|
||||
please use default authentication." />""" % INITIATOR_IQN)
|
||||
|
||||
CLUSTER_NAME = 'fake_cluster'
|
||||
REMOTE_CLUSTER_NAME = 'fake_cluster_2'
|
||||
CLUSTER_ADDRESS_1 = 'fake_cluster_address'
|
||||
CLUSTER_ADDRESS_2 = 'fake_cluster_address_2'
|
||||
VSERVER_NAME = 'fake_vserver'
|
||||
VSERVER_NAME_2 = 'fake_vserver_2'
|
||||
SM_SOURCE_VSERVER = 'fake_source_vserver'
|
||||
SM_SOURCE_VOLUME = 'fake_source_volume'
|
||||
SM_DEST_VSERVER = 'fake_destination_vserver'
|
||||
SM_DEST_VOLUME = 'fake_destination_volume'
|
||||
|
||||
CLUSTER_PEER_GET_ITER_RESPONSE = etree.XML("""
|
||||
<results status="passed">
|
||||
<attributes-list>
|
||||
<cluster-peer-info>
|
||||
<active-addresses>
|
||||
<remote-inet-address>%(addr1)s</remote-inet-address>
|
||||
<remote-inet-address>%(addr2)s</remote-inet-address>
|
||||
</active-addresses>
|
||||
<availability>available</availability>
|
||||
<cluster-name>%(cluster)s</cluster-name>
|
||||
<cluster-uuid>fake_uuid</cluster-uuid>
|
||||
<peer-addresses>
|
||||
<remote-inet-address>%(addr1)s</remote-inet-address>
|
||||
</peer-addresses>
|
||||
<remote-cluster-name>%(remote_cluster)s</remote-cluster-name>
|
||||
<serial-number>fake_serial_number</serial-number>
|
||||
<timeout>60</timeout>
|
||||
</cluster-peer-info>
|
||||
</attributes-list>
|
||||
<num-records>1</num-records>
|
||||
</results>
|
||||
""" % {
|
||||
'addr1': CLUSTER_ADDRESS_1,
|
||||
'addr2': CLUSTER_ADDRESS_2,
|
||||
'cluster': CLUSTER_NAME,
|
||||
'remote_cluster': REMOTE_CLUSTER_NAME,
|
||||
})
|
||||
|
||||
CLUSTER_PEER_POLICY_GET_RESPONSE = etree.XML("""
|
||||
<results status="passed">
|
||||
<attributes>
|
||||
<cluster-peer-policy>
|
||||
<is-unauthenticated-access-permitted>false</is-unauthenticated-access-permitted>
|
||||
<passphrase-minimum-length>8</passphrase-minimum-length>
|
||||
</cluster-peer-policy>
|
||||
</attributes>
|
||||
</results>
|
||||
""")
|
||||
|
||||
VSERVER_PEER_GET_ITER_RESPONSE = etree.XML("""
|
||||
<results status="passed">
|
||||
<attributes-list>
|
||||
<vserver-peer-info>
|
||||
<applications>
|
||||
<vserver-peer-application>snapmirror</vserver-peer-application>
|
||||
</applications>
|
||||
<peer-cluster>%(cluster)s</peer-cluster>
|
||||
<peer-state>peered</peer-state>
|
||||
<peer-vserver>%(vserver2)s</peer-vserver>
|
||||
<vserver>%(vserver1)s</vserver>
|
||||
</vserver-peer-info>
|
||||
</attributes-list>
|
||||
<num-records>2</num-records>
|
||||
</results>
|
||||
""" % {
|
||||
'cluster': CLUSTER_NAME,
|
||||
'vserver1': VSERVER_NAME,
|
||||
'vserver2': VSERVER_NAME_2
|
||||
})
|
||||
|
||||
SNAPMIRROR_GET_ITER_RESPONSE = etree.XML("""
|
||||
<results status="passed">
|
||||
<attributes-list>
|
||||
<snapmirror-info>
|
||||
<destination-location>%(vserver)s:%(volume2)s</destination-location>
|
||||
<destination-volume>%(volume2)s</destination-volume>
|
||||
<destination-volume-node>fake_destination_node</destination-volume-node>
|
||||
<destination-vserver>%(vserver)s</destination-vserver>
|
||||
<exported-snapshot>fake_snapshot</exported-snapshot>
|
||||
<exported-snapshot-timestamp>1442701782</exported-snapshot-timestamp>
|
||||
<is-constituent>false</is-constituent>
|
||||
<is-healthy>true</is-healthy>
|
||||
<lag-time>2187</lag-time>
|
||||
<last-transfer-duration>109</last-transfer-duration>
|
||||
<last-transfer-end-timestamp>1442701890</last-transfer-end-timestamp>
|
||||
<last-transfer-from>test:manila</last-transfer-from>
|
||||
<last-transfer-size>1171456</last-transfer-size>
|
||||
<last-transfer-type>initialize</last-transfer-type>
|
||||
<max-transfer-rate>0</max-transfer-rate>
|
||||
<mirror-state>snapmirrored</mirror-state>
|
||||
<newest-snapshot>fake_snapshot</newest-snapshot>
|
||||
<newest-snapshot-timestamp>1442701782</newest-snapshot-timestamp>
|
||||
<policy>DPDefault</policy>
|
||||
<relationship-control-plane>v2</relationship-control-plane>
|
||||
<relationship-id>ea8bfcc6-5f1d-11e5-8446-123478563412</relationship-id>
|
||||
<relationship-status>idle</relationship-status>
|
||||
<relationship-type>data_protection</relationship-type>
|
||||
<schedule>daily</schedule>
|
||||
<source-location>%(vserver)s:%(volume1)s</source-location>
|
||||
<source-volume>%(volume1)s</source-volume>
|
||||
<source-vserver>%(vserver)s</source-vserver>
|
||||
<vserver>fake_destination_vserver</vserver>
|
||||
</snapmirror-info>
|
||||
</attributes-list>
|
||||
<num-records>1</num-records>
|
||||
</results>
|
||||
""" % {
|
||||
'volume1': VOLUME_NAMES[0],
|
||||
'volume2': VOLUME_NAMES[1],
|
||||
'vserver': VOLUME_VSERVER_NAME,
|
||||
})
|
||||
|
||||
SNAPMIRROR_GET_ITER_FILTERED_RESPONSE = etree.XML("""
|
||||
<results status="passed">
|
||||
<attributes-list>
|
||||
<snapmirror-info>
|
||||
<destination-vserver>fake_destination_vserver</destination-vserver>
|
||||
<destination-volume>fake_destination_volume</destination-volume>
|
||||
<is-healthy>true</is-healthy>
|
||||
<mirror-state>snapmirrored</mirror-state>
|
||||
<schedule>daily</schedule>
|
||||
<source-vserver>fake_source_vserver</source-vserver>
|
||||
<source-volume>fake_source_volume</source-volume>
|
||||
</snapmirror-info>
|
||||
</attributes-list>
|
||||
<num-records>1</num-records>
|
||||
</results>
|
||||
""")
|
||||
|
||||
SNAPMIRROR_INITIALIZE_RESULT = etree.XML("""
|
||||
<results status="passed">
|
||||
<result-status>succeeded</result-status>
|
||||
</results>
|
||||
""")
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -34,6 +34,7 @@ EXPORT_PATH = '/fake/export/path'
|
||||
NFS_SHARE = '%s:%s' % (SHARE_IP, EXPORT_PATH)
|
||||
HOST_STRING = '%s@%s#%s' % (HOST_NAME, BACKEND_NAME, POOL_NAME)
|
||||
NFS_HOST_STRING = '%s@%s#%s' % (HOST_NAME, BACKEND_NAME, NFS_SHARE)
|
||||
AGGREGATE = 'aggr1'
|
||||
FLEXVOL = 'openstack-flexvol'
|
||||
NFS_FILE_PATH = 'nfsvol'
|
||||
PATH = '/vol/%s/%s' % (POOL_NAME, LUN_NAME)
|
||||
|
@ -331,6 +331,16 @@ class PerformanceCmodeLibraryTestCase(test.TestCase):
|
||||
|
||||
self.assertAlmostEqual(expected, result)
|
||||
|
||||
def test__update_for_failover(self):
|
||||
self.mock_object(self.perf_library, 'update_performance_cache')
|
||||
mock_client = mock.Mock(name='FAKE_ZAPI_CLIENT')
|
||||
|
||||
self.perf_library._update_for_failover(mock_client, self.fake_volumes)
|
||||
|
||||
self.assertEqual(mock_client, self.perf_library.zapi_client)
|
||||
self.perf_library.update_performance_cache.assert_called_once_with(
|
||||
self.fake_volumes)
|
||||
|
||||
def test_get_aggregates_for_pools(self):
|
||||
|
||||
result = self.perf_library._get_aggregates_for_pools(self.fake_volumes)
|
||||
|
@ -47,7 +47,10 @@ class NetAppBlockStorage7modeLibraryTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
super(NetAppBlockStorage7modeLibraryTestCase, self).setUp()
|
||||
|
||||
kwargs = {'configuration': self.get_config_7mode()}
|
||||
kwargs = {
|
||||
'configuration': self.get_config_7mode(),
|
||||
'host': 'openstack@7modeblock',
|
||||
}
|
||||
self.library = block_7mode.NetAppBlockStorage7modeLibrary(
|
||||
'driver', 'protocol', **kwargs)
|
||||
|
||||
|
@ -28,11 +28,12 @@ import uuid
|
||||
import ddt
|
||||
import mock
|
||||
from oslo_log import versionutils
|
||||
from oslo_service import loopingcall
|
||||
from oslo_utils import units
|
||||
import six
|
||||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _, _LW
|
||||
from cinder.i18n import _LW
|
||||
from cinder import test
|
||||
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
|
||||
import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes
|
||||
@ -48,7 +49,10 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
super(NetAppBlockStorageLibraryTestCase, self).setUp()
|
||||
|
||||
kwargs = {'configuration': self.get_config_base()}
|
||||
kwargs = {
|
||||
'configuration': self.get_config_base(),
|
||||
'host': 'openstack@netappblock',
|
||||
}
|
||||
self.library = block_base.NetAppBlockStorageLibrary(
|
||||
'driver', 'protocol', **kwargs)
|
||||
self.library.zapi_client = mock.Mock()
|
||||
@ -740,11 +744,11 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
|
||||
def test_setup_error_invalid_lun_os(self):
|
||||
self.library.configuration.netapp_lun_ostype = 'unknown'
|
||||
self.library.do_setup(mock.Mock())
|
||||
|
||||
self.assertRaises(exception.NetAppDriverException,
|
||||
self.library.check_for_setup_error)
|
||||
msg = _("Invalid value for NetApp configuration"
|
||||
" option netapp_lun_ostype.")
|
||||
block_base.LOG.error.assert_called_once_with(msg)
|
||||
|
||||
block_base.LOG.error.assert_called_once_with(mock.ANY)
|
||||
|
||||
@mock.patch.object(na_utils, 'check_flags', mock.Mock())
|
||||
@mock.patch.object(block_base, 'LOG', mock.Mock())
|
||||
@ -756,9 +760,7 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
|
||||
self.assertRaises(exception.NetAppDriverException,
|
||||
self.library.check_for_setup_error)
|
||||
|
||||
msg = _("Invalid value for NetApp configuration"
|
||||
" option netapp_host_type.")
|
||||
block_base.LOG.error.assert_called_once_with(msg)
|
||||
block_base.LOG.error.assert_called_once_with(mock.ANY)
|
||||
|
||||
@mock.patch.object(na_utils, 'check_flags', mock.Mock())
|
||||
def test_check_for_setup_error_both_config(self):
|
||||
@ -767,9 +769,13 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
|
||||
self.library.do_setup(mock.Mock())
|
||||
self.zapi_client.get_lun_list.return_value = ['lun1']
|
||||
self.library._extract_and_populate_luns = mock.Mock()
|
||||
mock_start_periodic_tasks = self.mock_object(
|
||||
self.library, '_start_periodic_tasks')
|
||||
self.library.check_for_setup_error()
|
||||
|
||||
self.library._extract_and_populate_luns.assert_called_once_with(
|
||||
['lun1'])
|
||||
mock_start_periodic_tasks.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(na_utils, 'check_flags', mock.Mock())
|
||||
def test_check_for_setup_error_no_os_host(self):
|
||||
@ -778,9 +784,29 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
|
||||
self.library.do_setup(mock.Mock())
|
||||
self.zapi_client.get_lun_list.return_value = ['lun1']
|
||||
self.library._extract_and_populate_luns = mock.Mock()
|
||||
mock_start_periodic_tasks = self.mock_object(
|
||||
self.library, '_start_periodic_tasks')
|
||||
|
||||
self.library.check_for_setup_error()
|
||||
self.library._extract_and_populate_luns.assert_called_once_with(
|
||||
['lun1'])
|
||||
mock_start_periodic_tasks.assert_called_once_with()
|
||||
|
||||
def test_start_periodic_tasks(self):
|
||||
|
||||
mock_handle_housekeeping_tasks = self.mock_object(
|
||||
self.library, '_handle_housekeeping_tasks')
|
||||
|
||||
housekeeping_periodic_task = mock.Mock()
|
||||
mock_loopingcall = self.mock_object(
|
||||
loopingcall, 'FixedIntervalLoopingCall',
|
||||
mock.Mock(return_value=housekeeping_periodic_task))
|
||||
|
||||
self.library._start_periodic_tasks()
|
||||
|
||||
mock_loopingcall.assert_called_once_with(
|
||||
mock_handle_housekeeping_tasks)
|
||||
self.assertTrue(housekeeping_periodic_task.start.called)
|
||||
|
||||
def test_delete_volume(self):
|
||||
mock_delete_lun = self.mock_object(self.library, '_delete_lun')
|
||||
|
@ -25,12 +25,16 @@ from oslo_service import loopingcall
|
||||
from cinder import exception
|
||||
from cinder import test
|
||||
import cinder.tests.unit.volume.drivers.netapp.dataontap.fakes as fake
|
||||
from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes as\
|
||||
fake_utils
|
||||
import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes
|
||||
from cinder.volume.drivers.netapp.dataontap import block_base
|
||||
from cinder.volume.drivers.netapp.dataontap import block_cmode
|
||||
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
|
||||
from cinder.volume.drivers.netapp.dataontap.client import client_base
|
||||
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
|
||||
from cinder.volume.drivers.netapp.dataontap.utils import data_motion
|
||||
from cinder.volume.drivers.netapp.dataontap.utils import utils as config_utils
|
||||
from cinder.volume.drivers.netapp import utils as na_utils
|
||||
|
||||
|
||||
@ -41,7 +45,10 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
super(NetAppBlockStorageCmodeLibraryTestCase, self).setUp()
|
||||
|
||||
kwargs = {'configuration': self.get_config_cmode()}
|
||||
kwargs = {
|
||||
'configuration': self.get_config_cmode(),
|
||||
'host': 'openstack@cdotblock',
|
||||
}
|
||||
self.library = block_cmode.NetAppBlockStorageCmodeLibrary(
|
||||
'driver', 'protocol', **kwargs)
|
||||
|
||||
@ -82,6 +89,9 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
|
||||
@mock.patch.object(block_base.NetAppBlockStorageLibrary, 'do_setup')
|
||||
def test_do_setup(self, super_do_setup, mock_check_flags):
|
||||
self.mock_object(client_base.Client, '_init_ssh_client')
|
||||
self.mock_object(
|
||||
config_utils, 'get_backend_configuration',
|
||||
mock.Mock(return_value=self.get_config_cmode()))
|
||||
context = mock.Mock()
|
||||
|
||||
self.library.do_setup(context)
|
||||
@ -94,8 +104,6 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
|
||||
block_base.NetAppBlockStorageLibrary, 'check_for_setup_error')
|
||||
mock_check_api_permissions = self.mock_object(
|
||||
self.library.ssc_library, 'check_api_permissions')
|
||||
mock_start_periodic_tasks = self.mock_object(
|
||||
self.library, '_start_periodic_tasks')
|
||||
mock_get_pool_map = self.mock_object(
|
||||
self.library, '_get_flexvol_to_pool_map',
|
||||
mock.Mock(return_value={'fake_map': None}))
|
||||
@ -104,7 +112,6 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
|
||||
|
||||
self.assertEqual(1, super_check_for_setup_error.call_count)
|
||||
mock_check_api_permissions.assert_called_once_with()
|
||||
self.assertEqual(1, mock_start_periodic_tasks.call_count)
|
||||
mock_get_pool_map.assert_called_once_with()
|
||||
|
||||
def test_check_for_setup_error_no_filtered_pools(self):
|
||||
@ -112,7 +119,6 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
|
||||
'check_for_setup_error')
|
||||
mock_check_api_permissions = self.mock_object(
|
||||
self.library.ssc_library, 'check_api_permissions')
|
||||
self.mock_object(self.library, '_start_periodic_tasks')
|
||||
self.mock_object(
|
||||
self.library, '_get_flexvol_to_pool_map',
|
||||
mock.Mock(return_value={}))
|
||||
@ -122,6 +128,51 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
|
||||
|
||||
mock_check_api_permissions.assert_called_once_with()
|
||||
|
||||
def test_start_periodic_tasks(self):
|
||||
|
||||
mock_update_ssc = self.mock_object(
|
||||
self.library, '_update_ssc')
|
||||
super_start_periodic_tasks = self.mock_object(
|
||||
block_base.NetAppBlockStorageLibrary, '_start_periodic_tasks')
|
||||
|
||||
update_ssc_periodic_task = mock.Mock()
|
||||
mock_loopingcall = self.mock_object(
|
||||
loopingcall, 'FixedIntervalLoopingCall',
|
||||
mock.Mock(return_value=update_ssc_periodic_task))
|
||||
|
||||
self.library._start_periodic_tasks()
|
||||
|
||||
mock_loopingcall.assert_called_once_with(mock_update_ssc)
|
||||
self.assertTrue(update_ssc_periodic_task.start.called)
|
||||
mock_update_ssc.assert_called_once_with()
|
||||
super_start_periodic_tasks.assert_called_once_with()
|
||||
|
||||
@ddt.data({'replication_enabled': True, 'failed_over': False},
|
||||
{'replication_enabled': True, 'failed_over': True},
|
||||
{'replication_enabled': False, 'failed_over': False})
|
||||
@ddt.unpack
|
||||
def test_handle_housekeeping_tasks(self, replication_enabled, failed_over):
|
||||
ensure_mirrors = self.mock_object(data_motion.DataMotionMixin,
|
||||
'ensure_snapmirrors')
|
||||
self.mock_object(self.library.ssc_library, 'get_ssc_flexvol_names',
|
||||
mock.Mock(return_value=fake_utils.SSC.keys()))
|
||||
self.library.replication_enabled = replication_enabled
|
||||
self.library.failed_over = failed_over
|
||||
super_handle_housekeeping_tasks = self.mock_object(
|
||||
block_base.NetAppBlockStorageLibrary, '_handle_housekeeping_tasks')
|
||||
|
||||
self.library._handle_housekeeping_tasks()
|
||||
|
||||
super_handle_housekeeping_tasks.assert_called_once_with()
|
||||
(self.zapi_client.remove_unused_qos_policy_groups.
|
||||
assert_called_once_with())
|
||||
if replication_enabled and not failed_over:
|
||||
ensure_mirrors.assert_called_once_with(
|
||||
self.library.configuration, self.library.backend_name,
|
||||
fake_utils.SSC.keys())
|
||||
else:
|
||||
self.assertFalse(ensure_mirrors.called)
|
||||
|
||||
def test_find_mapped_lun_igroup(self):
|
||||
igroups = [fake.IGROUP1]
|
||||
self.zapi_client.get_igroup_by_initiators.return_value = igroups
|
||||
@ -590,25 +641,67 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
|
||||
self.zapi_client.move_lun.assert_called_once_with(
|
||||
'/vol/FAKE_CMODE_VOL1/name', '/vol/FAKE_CMODE_VOL1/volume')
|
||||
|
||||
def test_start_periodic_tasks(self):
|
||||
@ddt.data({'secondary_id': 'dev0', 'configured_targets': ['dev1']},
|
||||
{'secondary_id': 'dev3', 'configured_targets': ['dev1', 'dev2']},
|
||||
{'secondary_id': 'dev1', 'configured_targets': []},
|
||||
{'secondary_id': None, 'configured_targets': []})
|
||||
@ddt.unpack
|
||||
def test_failover_host_invalid_replication_target(self, secondary_id,
|
||||
configured_targets):
|
||||
"""This tests executes a method in the DataMotionMixin."""
|
||||
self.library.backend_name = 'dev0'
|
||||
self.mock_object(data_motion.DataMotionMixin,
|
||||
'get_replication_backend_names',
|
||||
mock.Mock(return_value=configured_targets))
|
||||
complete_failover_call = self.mock_object(
|
||||
data_motion.DataMotionMixin, '_complete_failover')
|
||||
|
||||
mock_update_ssc = self.mock_object(
|
||||
self.library, '_update_ssc')
|
||||
mock_remove_unused_qos_policy_groups = self.mock_object(
|
||||
self.zapi_client, 'remove_unused_qos_policy_groups')
|
||||
self.assertRaises(exception.InvalidReplicationTarget,
|
||||
self.library.failover_host, 'fake_context', [],
|
||||
secondary_id=secondary_id)
|
||||
self.assertFalse(complete_failover_call.called)
|
||||
|
||||
update_ssc_periodic_task = mock.Mock()
|
||||
harvest_qos_periodic_task = mock.Mock()
|
||||
side_effect = [update_ssc_periodic_task, harvest_qos_periodic_task]
|
||||
mock_loopingcall = self.mock_object(
|
||||
loopingcall, 'FixedIntervalLoopingCall',
|
||||
mock.Mock(side_effect=side_effect))
|
||||
def test_failover_host_unable_to_failover(self):
|
||||
"""This tests executes a method in the DataMotionMixin."""
|
||||
self.library.backend_name = 'dev0'
|
||||
self.mock_object(
|
||||
data_motion.DataMotionMixin, '_complete_failover',
|
||||
mock.Mock(side_effect=exception.NetAppDriverException))
|
||||
self.mock_object(data_motion.DataMotionMixin,
|
||||
'get_replication_backend_names',
|
||||
mock.Mock(return_value=['dev1', 'dev2']))
|
||||
self.mock_object(self.library.ssc_library, 'get_ssc_flexvol_names',
|
||||
mock.Mock(return_value=fake_utils.SSC.keys()))
|
||||
self.mock_object(self.library, '_update_zapi_client')
|
||||
|
||||
self.library._start_periodic_tasks()
|
||||
self.assertRaises(exception.UnableToFailOver,
|
||||
self.library.failover_host, 'fake_context', [],
|
||||
secondary_id='dev1')
|
||||
data_motion.DataMotionMixin._complete_failover.assert_called_once_with(
|
||||
'dev0', ['dev1', 'dev2'], fake_utils.SSC.keys(), [],
|
||||
failover_target='dev1')
|
||||
self.assertFalse(self.library._update_zapi_client.called)
|
||||
|
||||
mock_loopingcall.assert_has_calls([
|
||||
mock.call(mock_update_ssc),
|
||||
mock.call(mock_remove_unused_qos_policy_groups)])
|
||||
self.assertTrue(update_ssc_periodic_task.start.called)
|
||||
self.assertTrue(harvest_qos_periodic_task.start.called)
|
||||
mock_update_ssc.assert_called_once_with()
|
||||
def test_failover_host(self):
|
||||
"""This tests executes a method in the DataMotionMixin."""
|
||||
self.library.backend_name = 'dev0'
|
||||
self.mock_object(data_motion.DataMotionMixin, '_complete_failover',
|
||||
mock.Mock(return_value=('dev1', [])))
|
||||
self.mock_object(data_motion.DataMotionMixin,
|
||||
'get_replication_backend_names',
|
||||
mock.Mock(return_value=['dev1', 'dev2']))
|
||||
self.mock_object(self.library.ssc_library, 'get_ssc_flexvol_names',
|
||||
mock.Mock(return_value=fake_utils.SSC.keys()))
|
||||
self.mock_object(self.library, '_update_zapi_client')
|
||||
|
||||
actual_active, vol_updates = self.library.failover_host(
|
||||
'fake_context', [], secondary_id='dev1')
|
||||
|
||||
data_motion.DataMotionMixin._complete_failover.assert_called_once_with(
|
||||
'dev0', ['dev1', 'dev2'], fake_utils.SSC.keys(), [],
|
||||
failover_target='dev1')
|
||||
self.library._update_zapi_client.assert_called_once_with('dev1')
|
||||
self.assertTrue(self.library.failed_over)
|
||||
self.assertEqual('dev1', self.library.failed_over_backend_name)
|
||||
self.assertEqual('dev1', actual_active)
|
||||
self.assertEqual([], vol_updates)
|
||||
|
@ -33,7 +33,10 @@ class NetApp7modeNfsDriverTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
super(NetApp7modeNfsDriverTestCase, self).setUp()
|
||||
|
||||
kwargs = {'configuration': self.get_config_7mode()}
|
||||
kwargs = {
|
||||
'configuration': self.get_config_7mode(),
|
||||
'host': 'openstack@7modenfs',
|
||||
}
|
||||
|
||||
with mock.patch.object(utils, 'get_root_helper',
|
||||
return_value=mock.Mock()):
|
||||
|
@ -25,6 +25,7 @@ import ddt
|
||||
import mock
|
||||
from os_brick.remotefs import remotefs as remotefs_brick
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_service import loopingcall
|
||||
from oslo_utils import units
|
||||
import shutil
|
||||
|
||||
@ -56,7 +57,10 @@ class NetAppNfsDriverTestCase(test.TestCase):
|
||||
self.fake_mount_point = fake.MOUNT_POINT
|
||||
self.ctxt = context.RequestContext('fake', 'fake', auth_token=True)
|
||||
|
||||
kwargs = {'configuration': configuration}
|
||||
kwargs = {
|
||||
'configuration': configuration,
|
||||
'host': 'openstack@netappnfs',
|
||||
}
|
||||
|
||||
with mock.patch.object(utils, 'get_root_helper',
|
||||
return_value=mock.Mock()):
|
||||
@ -92,6 +96,33 @@ class NetAppNfsDriverTestCase(test.TestCase):
|
||||
self.assertEqual(expected_reserved_percentage,
|
||||
round(result['reserved_percentage']))
|
||||
|
||||
def test_check_for_setup_error(self):
|
||||
super_check_for_setup_error = self.mock_object(
|
||||
nfs.NfsDriver, 'check_for_setup_error')
|
||||
mock_start_periodic_tasks = self.mock_object(
|
||||
self.driver, '_start_periodic_tasks')
|
||||
|
||||
self.driver.check_for_setup_error()
|
||||
|
||||
super_check_for_setup_error.assert_called_once_with()
|
||||
mock_start_periodic_tasks.assert_called_once_with()
|
||||
|
||||
def test_start_periodic_tasks(self):
|
||||
|
||||
mock_handle_housekeeping_tasks = self.mock_object(
|
||||
self.driver, '_handle_housekeeping_tasks')
|
||||
|
||||
housekeeping_periodic_task = mock.Mock()
|
||||
mock_loopingcall = self.mock_object(
|
||||
loopingcall, 'FixedIntervalLoopingCall',
|
||||
mock.Mock(return_value=housekeeping_periodic_task))
|
||||
|
||||
self.driver._start_periodic_tasks()
|
||||
|
||||
mock_loopingcall.assert_called_once_with(
|
||||
mock_handle_housekeeping_tasks)
|
||||
self.assertTrue(housekeeping_periodic_task.start.called)
|
||||
|
||||
def test_get_capacity_info_ipv4_share(self):
|
||||
expected = fake.CAPACITY_VALUES
|
||||
self.driver.zapi_client = mock.Mock()
|
||||
|
@ -35,6 +35,8 @@ from cinder.volume.drivers.netapp.dataontap.client import client_cmode
|
||||
from cinder.volume.drivers.netapp.dataontap import nfs_base
|
||||
from cinder.volume.drivers.netapp.dataontap import nfs_cmode
|
||||
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
|
||||
from cinder.volume.drivers.netapp.dataontap.utils import data_motion
|
||||
from cinder.volume.drivers.netapp.dataontap.utils import utils as config_utils
|
||||
from cinder.volume.drivers.netapp import utils as na_utils
|
||||
from cinder.volume.drivers import nfs
|
||||
from cinder.volume import utils as volume_utils
|
||||
@ -45,7 +47,10 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
super(NetAppCmodeNfsDriverTestCase, self).setUp()
|
||||
|
||||
kwargs = {'configuration': self.get_config_cmode()}
|
||||
kwargs = {
|
||||
'configuration': self.get_config_cmode(),
|
||||
'host': 'openstack@nfscmode',
|
||||
}
|
||||
|
||||
with mock.patch.object(utils, 'get_root_helper',
|
||||
return_value=mock.Mock()):
|
||||
@ -72,11 +77,42 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
|
||||
config.netapp_copyoffload_tool_path = 'copyoffload_tool_path'
|
||||
return config
|
||||
|
||||
@ddt.data({'active_backend_id': None, 'targets': ['dev1', 'dev2']},
|
||||
{'active_backend_id': None, 'targets': []},
|
||||
{'active_backend_id': 'dev1', 'targets': []},
|
||||
{'active_backend_id': 'dev1', 'targets': ['dev1', 'dev2']})
|
||||
@ddt.unpack
|
||||
def test_init_driver_for_replication(self, active_backend_id,
|
||||
targets):
|
||||
kwargs = {
|
||||
'configuration': self.get_config_cmode(),
|
||||
'host': 'openstack@nfscmode',
|
||||
'active_backend_id': active_backend_id,
|
||||
}
|
||||
self.mock_object(data_motion.DataMotionMixin,
|
||||
'get_replication_backend_names',
|
||||
mock.Mock(return_value=targets))
|
||||
with mock.patch.object(utils, 'get_root_helper',
|
||||
return_value=mock.Mock()):
|
||||
with mock.patch.object(remotefs_brick, 'RemoteFsClient',
|
||||
return_value=mock.Mock()):
|
||||
nfs_driver = nfs_cmode.NetAppCmodeNfsDriver(**kwargs)
|
||||
|
||||
self.assertEqual(active_backend_id,
|
||||
nfs_driver.failed_over_backend_name)
|
||||
self.assertEqual(active_backend_id is not None,
|
||||
nfs_driver.failed_over)
|
||||
self.assertEqual(len(targets) > 0,
|
||||
nfs_driver.replication_enabled)
|
||||
|
||||
@mock.patch.object(perf_cmode, 'PerformanceCmodeLibrary', mock.Mock())
|
||||
@mock.patch.object(client_cmode, 'Client', mock.Mock())
|
||||
@mock.patch.object(nfs.NfsDriver, 'do_setup')
|
||||
@mock.patch.object(na_utils, 'check_flags')
|
||||
def test_do_setup(self, mock_check_flags, mock_super_do_setup):
|
||||
self.mock_object(
|
||||
config_utils, 'get_backend_configuration',
|
||||
mock.Mock(return_value=self.get_config_cmode()))
|
||||
self.driver.do_setup(mock.Mock())
|
||||
|
||||
self.assertTrue(mock_check_flags.called)
|
||||
@ -94,6 +130,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
|
||||
'driver_version': self.driver.VERSION,
|
||||
'pools': {},
|
||||
'sparse_copy_volume': True,
|
||||
'replication_enabled': False,
|
||||
'storage_protocol': 'nfs',
|
||||
'vendor_name': 'NetApp',
|
||||
'volume_backend_name': 'NetApp_NFS_Cluster_direct',
|
||||
@ -342,8 +379,6 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
|
||||
def test_check_for_setup_error(self):
|
||||
super_check_for_setup_error = self.mock_object(
|
||||
nfs_base.NetAppNfsDriver, 'check_for_setup_error')
|
||||
mock_start_periodic_tasks = self.mock_object(
|
||||
self.driver, '_start_periodic_tasks')
|
||||
mock_check_api_permissions = self.mock_object(
|
||||
self.driver.ssc_library, 'check_api_permissions')
|
||||
|
||||
@ -351,7 +386,51 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
|
||||
|
||||
self.assertEqual(1, super_check_for_setup_error.call_count)
|
||||
mock_check_api_permissions.assert_called_once_with()
|
||||
self.assertEqual(1, mock_start_periodic_tasks.call_count)
|
||||
|
||||
def test_start_periodic_tasks(self):
|
||||
|
||||
mock_update_ssc = self.mock_object(
|
||||
self.driver, '_update_ssc')
|
||||
super_start_periodic_tasks = self.mock_object(
|
||||
nfs_base.NetAppNfsDriver, '_start_periodic_tasks')
|
||||
|
||||
update_ssc_periodic_task = mock.Mock()
|
||||
mock_loopingcall = self.mock_object(
|
||||
loopingcall, 'FixedIntervalLoopingCall',
|
||||
mock.Mock(return_value=update_ssc_periodic_task))
|
||||
|
||||
self.driver._start_periodic_tasks()
|
||||
|
||||
mock_loopingcall.assert_called_once_with(mock_update_ssc)
|
||||
self.assertTrue(update_ssc_periodic_task.start.called)
|
||||
mock_update_ssc.assert_called_once_with()
|
||||
super_start_periodic_tasks.assert_called_once_with()
|
||||
|
||||
@ddt.data({'replication_enabled': True, 'failed_over': False},
|
||||
{'replication_enabled': True, 'failed_over': True},
|
||||
{'replication_enabled': False, 'failed_over': False})
|
||||
@ddt.unpack
|
||||
def test_handle_housekeeping_tasks(self, replication_enabled, failed_over):
|
||||
ensure_mirrors = self.mock_object(data_motion.DataMotionMixin,
|
||||
'ensure_snapmirrors')
|
||||
self.mock_object(self.driver.ssc_library, 'get_ssc_flexvol_names',
|
||||
mock.Mock(return_value=fake_ssc.SSC.keys()))
|
||||
self.driver.replication_enabled = replication_enabled
|
||||
self.driver.failed_over = failed_over
|
||||
super_handle_housekeeping_tasks = self.mock_object(
|
||||
nfs_base.NetAppNfsDriver, '_handle_housekeeping_tasks')
|
||||
|
||||
self.driver._handle_housekeeping_tasks()
|
||||
|
||||
super_handle_housekeeping_tasks.assert_called_once_with()
|
||||
(self.driver.zapi_client.remove_unused_qos_policy_groups.
|
||||
assert_called_once_with())
|
||||
if replication_enabled and not failed_over:
|
||||
ensure_mirrors.assert_called_once_with(
|
||||
self.driver.configuration, self.driver.backend_name,
|
||||
fake_ssc.SSC.keys())
|
||||
else:
|
||||
self.assertFalse(ensure_mirrors.called)
|
||||
|
||||
def test_delete_volume(self):
|
||||
fake_provider_location = 'fake_provider_location'
|
||||
@ -836,29 +915,6 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
|
||||
mock_get_info.assert_has_calls([mock.call(fake.NFS_VOLUME)])
|
||||
super_unmanage.assert_has_calls([mock.call(fake.NFS_VOLUME)])
|
||||
|
||||
def test_start_periodic_tasks(self):
|
||||
|
||||
mock_update_ssc = self.mock_object(self.driver, '_update_ssc')
|
||||
mock_remove_unused_qos_policy_groups = self.mock_object(
|
||||
self.driver.zapi_client,
|
||||
'remove_unused_qos_policy_groups')
|
||||
|
||||
update_ssc_periodic_task = mock.Mock()
|
||||
harvest_qos_periodic_task = mock.Mock()
|
||||
side_effect = [update_ssc_periodic_task, harvest_qos_periodic_task]
|
||||
mock_loopingcall = self.mock_object(
|
||||
loopingcall, 'FixedIntervalLoopingCall',
|
||||
mock.Mock(side_effect=side_effect))
|
||||
|
||||
self.driver._start_periodic_tasks()
|
||||
|
||||
mock_loopingcall.assert_has_calls([
|
||||
mock.call(mock_update_ssc),
|
||||
mock.call(mock_remove_unused_qos_policy_groups)])
|
||||
self.assertTrue(update_ssc_periodic_task.start.called)
|
||||
self.assertTrue(harvest_qos_periodic_task.start.called)
|
||||
mock_update_ssc.assert_called_once_with()
|
||||
|
||||
@ddt.data({'has_space': True, 'type_match': True, 'expected': True},
|
||||
{'has_space': True, 'type_match': False, 'expected': False},
|
||||
{'has_space': False, 'type_match': True, 'expected': False},
|
||||
@ -1220,3 +1276,68 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
|
||||
self.driver._copy_from_remote_cache.assert_called_once_with(
|
||||
fake.VOLUME, fake.IMAGE_FILE_ID, cache_result[0])
|
||||
self.assertFalse(self.driver._post_clone_image.called)
|
||||
|
||||
@ddt.data({'secondary_id': 'dev0', 'configured_targets': ['dev1']},
|
||||
{'secondary_id': 'dev3', 'configured_targets': ['dev1', 'dev2']},
|
||||
{'secondary_id': 'dev1', 'configured_targets': []},
|
||||
{'secondary_id': None, 'configured_targets': []})
|
||||
@ddt.unpack
|
||||
def test_failover_host_invalid_replication_target(self, secondary_id,
|
||||
configured_targets):
|
||||
"""This tests executes a method in the DataMotionMixin."""
|
||||
self.driver.backend_name = 'dev0'
|
||||
self.mock_object(data_motion.DataMotionMixin,
|
||||
'get_replication_backend_names',
|
||||
mock.Mock(return_value=configured_targets))
|
||||
complete_failover_call = self.mock_object(
|
||||
data_motion.DataMotionMixin, '_complete_failover')
|
||||
|
||||
self.assertRaises(exception.InvalidReplicationTarget,
|
||||
self.driver.failover_host, 'fake_context', [],
|
||||
secondary_id=secondary_id)
|
||||
self.assertFalse(complete_failover_call.called)
|
||||
|
||||
def test_failover_host_unable_to_failover(self):
|
||||
"""This tests executes a method in the DataMotionMixin."""
|
||||
self.driver.backend_name = 'dev0'
|
||||
self.mock_object(
|
||||
data_motion.DataMotionMixin, '_complete_failover',
|
||||
mock.Mock(side_effect=exception.NetAppDriverException))
|
||||
self.mock_object(data_motion.DataMotionMixin,
|
||||
'get_replication_backend_names',
|
||||
mock.Mock(return_value=['dev1', 'dev2']))
|
||||
self.mock_object(self.driver.ssc_library, 'get_ssc_flexvol_names',
|
||||
mock.Mock(return_value=fake_ssc.SSC.keys()))
|
||||
self.mock_object(self.driver, '_update_zapi_client')
|
||||
|
||||
self.assertRaises(exception.UnableToFailOver,
|
||||
self.driver.failover_host, 'fake_context', [],
|
||||
secondary_id='dev1')
|
||||
data_motion.DataMotionMixin._complete_failover.assert_called_once_with(
|
||||
'dev0', ['dev1', 'dev2'], fake_ssc.SSC.keys(), [],
|
||||
failover_target='dev1')
|
||||
self.assertFalse(self.driver._update_zapi_client.called)
|
||||
|
||||
def test_failover_host(self):
|
||||
"""This tests executes a method in the DataMotionMixin."""
|
||||
self.driver.backend_name = 'dev0'
|
||||
self.mock_object(data_motion.DataMotionMixin, '_complete_failover',
|
||||
mock.Mock(return_value=('dev1', [])))
|
||||
self.mock_object(data_motion.DataMotionMixin,
|
||||
'get_replication_backend_names',
|
||||
mock.Mock(return_value=['dev1', 'dev2']))
|
||||
self.mock_object(self.driver.ssc_library, 'get_ssc_flexvol_names',
|
||||
mock.Mock(return_value=fake_ssc.SSC.keys()))
|
||||
self.mock_object(self.driver, '_update_zapi_client')
|
||||
|
||||
actual_active, vol_updates = self.driver.failover_host(
|
||||
'fake_context', [], secondary_id='dev1')
|
||||
|
||||
data_motion.DataMotionMixin._complete_failover.assert_called_once_with(
|
||||
'dev0', ['dev1', 'dev2'], fake_ssc.SSC.keys(), [],
|
||||
failover_target='dev1')
|
||||
self.driver._update_zapi_client.assert_called_once_with('dev1')
|
||||
self.assertTrue(self.driver.failed_over)
|
||||
self.assertEqual('dev1', self.driver.failed_over_backend_name)
|
||||
self.assertEqual('dev1', actual_active)
|
||||
self.assertEqual([], vol_updates)
|
||||
|
@ -13,6 +13,10 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from cinder.volume import configuration
|
||||
from cinder.volume import driver
|
||||
from cinder.volume.drivers.netapp import options as na_opts
|
||||
|
||||
SSC_VSERVER = 'fake_vserver'
|
||||
SSC_VOLUMES = ('volume1', 'volume2')
|
||||
SSC_VOLUME_MAP = {
|
||||
@ -101,3 +105,31 @@ SSC_AGGREGATE_INFO = {
|
||||
'netapp_hybrid_aggregate': True,
|
||||
},
|
||||
}
|
||||
|
||||
PROVISIONING_OPTS = {
|
||||
'aggregate': 'fake_aggregate',
|
||||
'thin_provisioned': True,
|
||||
'snapshot_policy': None,
|
||||
'language': 'en_US',
|
||||
'dedupe_enabled': False,
|
||||
'compression_enabled': False,
|
||||
'snapshot_reserve': '12',
|
||||
'volume_type': 'rw',
|
||||
'size': 20,
|
||||
}
|
||||
|
||||
|
||||
def get_fake_cmode_config(backend_name):
|
||||
|
||||
config = configuration.Configuration(driver.volume_opts,
|
||||
config_group=backend_name)
|
||||
config.append_config_values(na_opts.netapp_proxy_opts)
|
||||
config.append_config_values(na_opts.netapp_connection_opts)
|
||||
config.append_config_values(na_opts.netapp_transport_opts)
|
||||
config.append_config_values(na_opts.netapp_basicauth_opts)
|
||||
config.append_config_values(na_opts.netapp_provisioning_opts)
|
||||
config.append_config_values(na_opts.netapp_cluster_opts)
|
||||
config.append_config_values(na_opts.netapp_san_opts)
|
||||
config.append_config_values(na_opts.netapp_replication_opts)
|
||||
|
||||
return config
|
||||
|
@ -153,6 +153,16 @@ class CapabilitiesLibraryTestCase(test.TestCase):
|
||||
mock_get_ssc_aggregate_info.assert_has_calls([
|
||||
mock.call('aggr1'), mock.call('aggr2')])
|
||||
|
||||
def test__update_for_failover(self):
|
||||
self.mock_object(self.ssc_library, 'update_ssc')
|
||||
flexvol_map = {'volume1': fake.SSC_VOLUME_MAP['volume1']}
|
||||
mock_client = mock.Mock(name='FAKE_ZAPI_CLIENT')
|
||||
|
||||
self.ssc_library._update_for_failover(mock_client, flexvol_map)
|
||||
|
||||
self.assertEqual(mock_client, self.ssc_library.zapi_client)
|
||||
self.ssc_library.update_ssc.assert_called_once_with(flexvol_map)
|
||||
|
||||
@ddt.data({'lun_space_guarantee': True},
|
||||
{'lun_space_guarantee': False})
|
||||
@ddt.unpack
|
||||
|
@ -0,0 +1,749 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import time
|
||||
|
||||
import copy
|
||||
import ddt
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
|
||||
from cinder import exception
|
||||
from cinder import test
|
||||
from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes
|
||||
from cinder.volume import configuration
|
||||
from cinder.volume import driver
|
||||
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
|
||||
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
|
||||
from cinder.volume.drivers.netapp.dataontap.utils import data_motion
|
||||
from cinder.volume.drivers.netapp.dataontap.utils import utils
|
||||
from cinder.volume.drivers.netapp import options as na_opts
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class NetAppCDOTDataMotionMixinTestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(NetAppCDOTDataMotionMixinTestCase, self).setUp()
|
||||
self.dm_mixin = data_motion.DataMotionMixin()
|
||||
self.src_backend = 'backend1'
|
||||
self.dest_backend = 'backend2'
|
||||
self.src_vserver = 'source_vserver'
|
||||
self.dest_vserver = 'dest_vserver'
|
||||
self._setup_mock_config()
|
||||
self.mock_cmode_client = self.mock_object(client_cmode, 'Client')
|
||||
self.src_flexvol_name = 'volume_c02d497a_236c_4852_812a_0d39373e312a'
|
||||
self.dest_flexvol_name = self.src_flexvol_name
|
||||
self.mock_src_client = mock.Mock()
|
||||
self.mock_dest_client = mock.Mock()
|
||||
self.config = fakes.get_fake_cmode_config(self.src_backend)
|
||||
self.mock_object(utils, 'get_backend_configuration',
|
||||
mock.Mock(side_effect=[self.mock_dest_config,
|
||||
self.mock_src_config]))
|
||||
self.mock_object(utils, 'get_client_for_backend',
|
||||
mock.Mock(side_effect=[self.mock_dest_client,
|
||||
self.mock_src_client]))
|
||||
|
||||
def _setup_mock_config(self):
|
||||
self.mock_src_config = configuration.Configuration(
|
||||
driver.volume_opts, config_group=self.src_backend)
|
||||
self.mock_dest_config = configuration.Configuration(
|
||||
driver.volume_opts, config_group=self.dest_backend)
|
||||
|
||||
for config in (self.mock_src_config, self.mock_dest_config):
|
||||
config.append_config_values(na_opts.netapp_proxy_opts)
|
||||
config.append_config_values(na_opts.netapp_connection_opts)
|
||||
config.append_config_values(na_opts.netapp_transport_opts)
|
||||
config.append_config_values(na_opts.netapp_basicauth_opts)
|
||||
config.append_config_values(na_opts.netapp_provisioning_opts)
|
||||
config.append_config_values(na_opts.netapp_cluster_opts)
|
||||
config.append_config_values(na_opts.netapp_san_opts)
|
||||
config.append_config_values(na_opts.netapp_replication_opts)
|
||||
config.netapp_snapmirror_quiesce_timeout = 10
|
||||
|
||||
CONF.set_override('netapp_vserver', self.src_vserver,
|
||||
group=self.src_backend, enforce_type=True)
|
||||
CONF.set_override('netapp_vserver', self.dest_vserver,
|
||||
group=self.dest_backend, enforce_type=True)
|
||||
|
||||
@ddt.data(None, [], [{'some_key': 'some_value'}])
|
||||
def test_get_replication_backend_names_none(self, replication_device):
|
||||
CONF.set_override('replication_device', replication_device,
|
||||
group=self.src_backend, enforce_type=True)
|
||||
|
||||
devices = self.dm_mixin.get_replication_backend_names(self.config)
|
||||
|
||||
self.assertEqual(0, len(devices))
|
||||
|
||||
@ddt.data([{'backend_id': 'xyzzy'}, {'backend_id': 'spoon!'}],
|
||||
[{'backend_id': 'foobar'}])
|
||||
def test_get_replication_backend_names_valid(self, replication_device):
|
||||
CONF.set_override('replication_device', replication_device,
|
||||
group=self.src_backend, enforce_type=True)
|
||||
|
||||
devices = self.dm_mixin.get_replication_backend_names(self.config)
|
||||
|
||||
self.assertEqual(len(replication_device), len(devices))
|
||||
|
||||
def test_get_snapmirrors(self):
|
||||
self.mock_object(self.mock_dest_client, 'get_snapmirrors')
|
||||
|
||||
self.dm_mixin.get_snapmirrors(self.src_backend,
|
||||
self.dest_backend,
|
||||
self.src_flexvol_name,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
self.mock_dest_client.get_snapmirrors.assert_called_with(
|
||||
self.src_vserver, self.src_flexvol_name,
|
||||
self.dest_vserver, self.dest_flexvol_name,
|
||||
desired_attributes=['relationship-status',
|
||||
'mirror-state',
|
||||
'source-vserver',
|
||||
'source-volume',
|
||||
'destination-vserver',
|
||||
'destination-volume',
|
||||
'last-transfer-end-timestamp',
|
||||
'lag-time'])
|
||||
self.assertEqual(1, self.mock_dest_client.get_snapmirrors.call_count)
|
||||
|
||||
@ddt.data([], ['backend1'], ['backend1', 'backend2'])
|
||||
def test_get_replication_backend_stats(self, replication_backend_names):
|
||||
self.mock_object(self.dm_mixin, 'get_replication_backend_names',
|
||||
mock.Mock(return_value=replication_backend_names))
|
||||
enabled_stats = {
|
||||
'replication_count': len(replication_backend_names),
|
||||
'replication_targets': replication_backend_names,
|
||||
'replication_type': 'async',
|
||||
}
|
||||
expected_stats = {
|
||||
'replication_enabled': len(replication_backend_names) > 0,
|
||||
}
|
||||
if len(replication_backend_names) > 0:
|
||||
expected_stats.update(enabled_stats)
|
||||
|
||||
actual_stats = self.dm_mixin.get_replication_backend_stats(self.config)
|
||||
|
||||
self.assertDictMatch(expected_stats, actual_stats)
|
||||
|
||||
@ddt.data(None, [],
|
||||
[{'backend_id': 'replication_backend_2', 'aggr2': 'aggr20'}])
|
||||
def test_get_replication_aggregate_map_none(self, replication_aggr_map):
|
||||
|
||||
self.mock_object(utils, 'get_backend_configuration',
|
||||
mock.Mock(return_value=self.config))
|
||||
CONF.set_override('netapp_replication_aggregate_map',
|
||||
replication_aggr_map,
|
||||
group=self.src_backend, enforce_type=True)
|
||||
|
||||
aggr_map = self.dm_mixin._get_replication_aggregate_map(
|
||||
self.src_backend, 'replication_backend_1')
|
||||
|
||||
self.assertEqual(0, len(aggr_map))
|
||||
|
||||
@ddt.data([{'backend_id': 'replication_backend_1', 'aggr1': 'aggr10'}],
|
||||
[{'backend_id': 'replication_backend_1', 'aggr1': 'aggr10'},
|
||||
{'backend_id': 'replication_backend_2', 'aggr2': 'aggr20'}])
|
||||
def test_get_replication_aggregate_map_valid(self, replication_aggr_map):
|
||||
self.mock_object(utils, 'get_backend_configuration',
|
||||
mock.Mock(return_value=self.config))
|
||||
CONF.set_override('netapp_replication_aggregate_map',
|
||||
replication_aggr_map, group=self.src_backend,
|
||||
enforce_type=True)
|
||||
|
||||
aggr_map = self.dm_mixin._get_replication_aggregate_map(
|
||||
self.src_backend, 'replication_backend_1')
|
||||
|
||||
self.assertDictMatch({'aggr1': 'aggr10'}, aggr_map)
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test_create_snapmirror_dest_flexvol_exists(self, dest_exists):
|
||||
mock_dest_client = mock.Mock()
|
||||
self.mock_object(mock_dest_client, 'flexvol_exists',
|
||||
mock.Mock(return_value=dest_exists))
|
||||
self.mock_object(mock_dest_client, 'get_snapmirrors',
|
||||
mock.Mock(return_value=None))
|
||||
create_destination_flexvol = self.mock_object(
|
||||
self.dm_mixin, 'create_destination_flexvol')
|
||||
self.mock_object(utils, 'get_client_for_backend',
|
||||
mock.Mock(return_value=mock_dest_client))
|
||||
|
||||
self.dm_mixin.create_snapmirror(self.src_backend,
|
||||
self.dest_backend,
|
||||
self.src_flexvol_name,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
if not dest_exists:
|
||||
create_destination_flexvol.assert_called_once_with(
|
||||
self.src_backend, self.dest_backend, self.src_flexvol_name,
|
||||
self.dest_flexvol_name)
|
||||
else:
|
||||
self.assertFalse(create_destination_flexvol.called)
|
||||
mock_dest_client.create_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name, schedule='hourly')
|
||||
mock_dest_client.initialize_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
@ddt.data('uninitialized', 'broken-off', 'snapmirrored')
|
||||
def test_create_snapmirror_snapmirror_exists_state(self, mirror_state):
|
||||
mock_dest_client = mock.Mock()
|
||||
existing_snapmirrors = [{'mirror-state': mirror_state}]
|
||||
self.mock_object(self.dm_mixin, 'create_destination_flexvol')
|
||||
self.mock_object(utils, 'get_client_for_backend',
|
||||
mock.Mock(return_value=mock_dest_client))
|
||||
self.mock_object(mock_dest_client, 'flexvol_exists',
|
||||
mock.Mock(return_value=True))
|
||||
self.mock_object(mock_dest_client, 'get_snapmirrors',
|
||||
mock.Mock(return_value=existing_snapmirrors))
|
||||
|
||||
self.dm_mixin.create_snapmirror(self.src_backend,
|
||||
self.dest_backend,
|
||||
self.src_flexvol_name,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
self.assertFalse(mock_dest_client.create_snapmirror.called)
|
||||
self.assertFalse(mock_dest_client.initialize_snapmirror.called)
|
||||
self.assertFalse(self.dm_mixin.create_destination_flexvol.called)
|
||||
if mirror_state == 'snapmirrored':
|
||||
self.assertFalse(mock_dest_client.resume_snapmirror.called)
|
||||
self.assertFalse(mock_dest_client.resync_snapmirror.called)
|
||||
else:
|
||||
mock_dest_client.resume_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name,
|
||||
self.dest_vserver, self.dest_flexvol_name)
|
||||
mock_dest_client.resume_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name,
|
||||
self.dest_vserver, self.dest_flexvol_name)
|
||||
|
||||
@ddt.data('resume_snapmirror', 'resync_snapmirror')
|
||||
def test_create_snapmirror_snapmirror_exists_repair_exception(self,
|
||||
failed_call):
|
||||
mock_dest_client = mock.Mock()
|
||||
mock_exception_log = self.mock_object(data_motion.LOG, 'exception')
|
||||
existing_snapmirrors = [{'mirror-state': 'broken-off'}]
|
||||
self.mock_object(self.dm_mixin, 'create_destination_flexvol')
|
||||
self.mock_object(utils, 'get_client_for_backend',
|
||||
mock.Mock(return_value=mock_dest_client))
|
||||
self.mock_object(mock_dest_client, 'flexvol_exists',
|
||||
mock.Mock(return_value=True))
|
||||
self.mock_object(mock_dest_client, 'get_snapmirrors',
|
||||
mock.Mock(return_value=existing_snapmirrors))
|
||||
self.mock_object(mock_dest_client, failed_call,
|
||||
mock.Mock(side_effect=netapp_api.NaApiError))
|
||||
|
||||
self.dm_mixin.create_snapmirror(self.src_backend,
|
||||
self.dest_backend,
|
||||
self.src_flexvol_name,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
self.assertFalse(mock_dest_client.create_snapmirror.called)
|
||||
self.assertFalse(mock_dest_client.initialize_snapmirror.called)
|
||||
self.assertFalse(self.dm_mixin.create_destination_flexvol.called)
|
||||
mock_dest_client.resume_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name,
|
||||
self.dest_vserver, self.dest_flexvol_name)
|
||||
mock_dest_client.resume_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name,
|
||||
self.dest_vserver, self.dest_flexvol_name)
|
||||
self.assertEqual(1, mock_exception_log.call_count)
|
||||
|
||||
def test_delete_snapmirror(self):
|
||||
mock_src_client = mock.Mock()
|
||||
mock_dest_client = mock.Mock()
|
||||
self.mock_object(utils, 'get_client_for_backend',
|
||||
mock.Mock(side_effect=[mock_dest_client,
|
||||
mock_src_client]))
|
||||
|
||||
self.dm_mixin.delete_snapmirror(self.src_backend,
|
||||
self.dest_backend,
|
||||
self.src_flexvol_name,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
mock_dest_client.abort_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name, clear_checkpoint=False)
|
||||
mock_dest_client.delete_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name)
|
||||
mock_src_client.release_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
def test_delete_snapmirror_does_not_exist(self):
|
||||
"""Ensure delete succeeds when the snapmirror does not exist."""
|
||||
mock_src_client = mock.Mock()
|
||||
mock_dest_client = mock.Mock()
|
||||
mock_dest_client.abort_snapmirror.side_effect = netapp_api.NaApiError(
|
||||
code=netapp_api.EAPIERROR)
|
||||
self.mock_object(utils, 'get_client_for_backend',
|
||||
mock.Mock(side_effect=[mock_dest_client,
|
||||
mock_src_client]))
|
||||
|
||||
self.dm_mixin.delete_snapmirror(self.src_backend,
|
||||
self.dest_backend,
|
||||
self.src_flexvol_name,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
mock_dest_client.abort_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name, clear_checkpoint=False)
|
||||
mock_dest_client.delete_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name)
|
||||
mock_src_client.release_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
def test_delete_snapmirror_error_deleting(self):
|
||||
"""Ensure delete succeeds when the snapmirror does not exist."""
|
||||
mock_src_client = mock.Mock()
|
||||
mock_dest_client = mock.Mock()
|
||||
mock_dest_client.delete_snapmirror.side_effect = netapp_api.NaApiError(
|
||||
code=netapp_api.ESOURCE_IS_DIFFERENT
|
||||
)
|
||||
self.mock_object(utils, 'get_client_for_backend',
|
||||
mock.Mock(side_effect=[mock_dest_client,
|
||||
mock_src_client]))
|
||||
|
||||
self.dm_mixin.delete_snapmirror(self.src_backend,
|
||||
self.dest_backend,
|
||||
self.src_flexvol_name,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
mock_dest_client.abort_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name, clear_checkpoint=False)
|
||||
mock_dest_client.delete_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name)
|
||||
mock_src_client.release_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
def test_delete_snapmirror_error_releasing(self):
|
||||
"""Ensure delete succeeds when the snapmirror does not exist."""
|
||||
mock_src_client = mock.Mock()
|
||||
mock_dest_client = mock.Mock()
|
||||
mock_src_client.release_snapmirror.side_effect = (
|
||||
netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND))
|
||||
self.mock_object(utils, 'get_client_for_backend',
|
||||
mock.Mock(side_effect=[mock_dest_client,
|
||||
mock_src_client]))
|
||||
|
||||
self.dm_mixin.delete_snapmirror(self.src_backend,
|
||||
self.dest_backend,
|
||||
self.src_flexvol_name,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
mock_dest_client.abort_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name, clear_checkpoint=False)
|
||||
mock_dest_client.delete_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name)
|
||||
mock_src_client.release_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
def test_delete_snapmirror_without_release(self):
|
||||
mock_src_client = mock.Mock()
|
||||
mock_dest_client = mock.Mock()
|
||||
self.mock_object(utils, 'get_client_for_backend',
|
||||
mock.Mock(side_effect=[mock_dest_client,
|
||||
mock_src_client]))
|
||||
|
||||
self.dm_mixin.delete_snapmirror(self.src_backend,
|
||||
self.dest_backend,
|
||||
self.src_flexvol_name,
|
||||
self.dest_flexvol_name,
|
||||
release=False)
|
||||
|
||||
mock_dest_client.abort_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name, clear_checkpoint=False)
|
||||
mock_dest_client.delete_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name)
|
||||
self.assertFalse(mock_src_client.release_snapmirror.called)
|
||||
|
||||
def test_delete_snapmirror_source_unreachable(self):
|
||||
mock_src_client = mock.Mock()
|
||||
mock_dest_client = mock.Mock()
|
||||
self.mock_object(utils, 'get_client_for_backend',
|
||||
mock.Mock(side_effect=[mock_dest_client,
|
||||
Exception]))
|
||||
|
||||
self.dm_mixin.delete_snapmirror(self.src_backend,
|
||||
self.dest_backend,
|
||||
self.src_flexvol_name,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
mock_dest_client.abort_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name, clear_checkpoint=False)
|
||||
mock_dest_client.delete_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
self.assertFalse(mock_src_client.release_snapmirror.called)
|
||||
|
||||
def test_quiesce_then_abort_timeout(self):
|
||||
self.mock_object(time, 'sleep')
|
||||
mock_get_snapmirrors = mock.Mock(
|
||||
return_value=[{'relationship-status': 'transferring'}])
|
||||
self.mock_object(self.mock_dest_client, 'get_snapmirrors',
|
||||
mock_get_snapmirrors)
|
||||
|
||||
self.dm_mixin.quiesce_then_abort(self.src_backend,
|
||||
self.dest_backend,
|
||||
self.src_flexvol_name,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
self.mock_dest_client.get_snapmirrors.assert_called_with(
|
||||
self.src_vserver, self.src_flexvol_name,
|
||||
self.dest_vserver, self.dest_flexvol_name,
|
||||
desired_attributes=['relationship-status', 'mirror-state'])
|
||||
self.assertEqual(2, self.mock_dest_client.get_snapmirrors.call_count)
|
||||
self.mock_dest_client.quiesce_snapmirror.assert_called_with(
|
||||
self.src_vserver, self.src_flexvol_name,
|
||||
self.dest_vserver, self.dest_flexvol_name)
|
||||
self.mock_dest_client.abort_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name, clear_checkpoint=False)
|
||||
|
||||
def test_update_snapmirror(self):
|
||||
self.mock_object(self.mock_dest_client, 'get_snapmirrors')
|
||||
|
||||
self.dm_mixin.update_snapmirror(self.src_backend,
|
||||
self.dest_backend,
|
||||
self.src_flexvol_name,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
self.mock_dest_client.update_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name,
|
||||
self.dest_vserver, self.dest_flexvol_name)
|
||||
|
||||
def test_quiesce_then_abort_wait_for_quiesced(self):
|
||||
self.mock_object(time, 'sleep')
|
||||
self.mock_object(self.mock_dest_client, 'get_snapmirrors',
|
||||
mock.Mock(side_effect=[
|
||||
[{'relationship-status': 'transferring'}],
|
||||
[{'relationship-status': 'quiesced'}]]))
|
||||
|
||||
self.dm_mixin.quiesce_then_abort(self.src_backend,
|
||||
self.dest_backend,
|
||||
self.src_flexvol_name,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
self.mock_dest_client.get_snapmirrors.assert_called_with(
|
||||
self.src_vserver, self.src_flexvol_name,
|
||||
self.dest_vserver, self.dest_flexvol_name,
|
||||
desired_attributes=['relationship-status', 'mirror-state'])
|
||||
self.assertEqual(2, self.mock_dest_client.get_snapmirrors.call_count)
|
||||
self.mock_dest_client.quiesce_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name,
|
||||
self.dest_vserver, self.dest_flexvol_name)
|
||||
|
||||
def test_break_snapmirror(self):
|
||||
self.mock_object(self.dm_mixin, 'quiesce_then_abort')
|
||||
|
||||
self.dm_mixin.break_snapmirror(self.src_backend,
|
||||
self.dest_backend,
|
||||
self.src_flexvol_name,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
self.dm_mixin.quiesce_then_abort.assert_called_once_with(
|
||||
self.src_backend, self.dest_backend,
|
||||
self.src_flexvol_name, self.dest_flexvol_name)
|
||||
self.mock_dest_client.break_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name,
|
||||
self.dest_vserver, self.dest_flexvol_name)
|
||||
self.mock_dest_client.mount_flexvol.assert_called_once_with(
|
||||
self.dest_flexvol_name)
|
||||
|
||||
def test_break_snapmirror_wait_for_quiesced(self):
|
||||
self.mock_object(self.dm_mixin, 'quiesce_then_abort')
|
||||
|
||||
self.dm_mixin.break_snapmirror(self.src_backend,
|
||||
self.dest_backend,
|
||||
self.src_flexvol_name,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
self.dm_mixin.quiesce_then_abort.assert_called_once_with(
|
||||
self.src_backend, self.dest_backend,
|
||||
self.src_flexvol_name, self.dest_flexvol_name,)
|
||||
self.mock_dest_client.break_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name,
|
||||
self.dest_vserver, self.dest_flexvol_name)
|
||||
self.mock_dest_client.mount_flexvol.assert_called_once_with(
|
||||
self.dest_flexvol_name)
|
||||
|
||||
def test_resync_snapmirror(self):
|
||||
self.dm_mixin.resync_snapmirror(self.src_backend,
|
||||
self.dest_backend,
|
||||
self.src_flexvol_name,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
self.mock_dest_client.resync_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name,
|
||||
self.dest_vserver, self.dest_flexvol_name)
|
||||
|
||||
def test_resume_snapmirror(self):
|
||||
self.dm_mixin.resume_snapmirror(self.src_backend,
|
||||
self.dest_backend,
|
||||
self.src_flexvol_name,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
self.mock_dest_client.resume_snapmirror.assert_called_once_with(
|
||||
self.src_vserver, self.src_flexvol_name, self.dest_vserver,
|
||||
self.dest_flexvol_name)
|
||||
|
||||
@ddt.data({'size': 1, 'aggr_map': {}},
|
||||
{'size': 1, 'aggr_map': {'aggr02': 'aggr20'}},
|
||||
{'size': None, 'aggr_map': {'aggr01': 'aggr10'}})
|
||||
@ddt.unpack
|
||||
def test_create_destination_flexvol_exception(self, size, aggr_map):
|
||||
self.mock_object(
|
||||
self.mock_src_client, 'get_provisioning_options_from_flexvol',
|
||||
mock.Mock(return_value={'size': size, 'aggregate': 'aggr01'}))
|
||||
self.mock_object(self.dm_mixin, '_get_replication_aggregate_map',
|
||||
mock.Mock(return_value=aggr_map))
|
||||
mock_client_call = self.mock_object(
|
||||
self.mock_dest_client, 'create_flexvol')
|
||||
|
||||
self.assertRaises(exception.NetAppDriverException,
|
||||
self.dm_mixin.create_destination_flexvol,
|
||||
self.src_backend, self.dest_backend,
|
||||
self.src_flexvol_name, self.dest_flexvol_name)
|
||||
if size:
|
||||
self.dm_mixin._get_replication_aggregate_map.\
|
||||
assert_called_once_with(self.src_backend, self.dest_backend)
|
||||
else:
|
||||
self.assertFalse(
|
||||
self.dm_mixin._get_replication_aggregate_map.called)
|
||||
self.assertFalse(mock_client_call.called)
|
||||
|
||||
def test_create_destination_flexvol(self):
|
||||
aggr_map = {
|
||||
fakes.PROVISIONING_OPTS['aggregate']: 'aggr01',
|
||||
'aggr20': 'aggr02',
|
||||
}
|
||||
provisioning_opts = copy.deepcopy(fakes.PROVISIONING_OPTS)
|
||||
expected_prov_opts = copy.deepcopy(fakes.PROVISIONING_OPTS)
|
||||
expected_prov_opts.pop('volume_type', None)
|
||||
expected_prov_opts.pop('size', None)
|
||||
expected_prov_opts.pop('aggregate', None)
|
||||
mock_get_provisioning_opts_call = self.mock_object(
|
||||
self.mock_src_client, 'get_provisioning_options_from_flexvol',
|
||||
mock.Mock(return_value=provisioning_opts))
|
||||
self.mock_object(self.dm_mixin, '_get_replication_aggregate_map',
|
||||
mock.Mock(return_value=aggr_map))
|
||||
mock_client_call = self.mock_object(
|
||||
self.mock_dest_client, 'create_flexvol')
|
||||
|
||||
retval = self.dm_mixin.create_destination_flexvol(
|
||||
self.src_backend, self.dest_backend,
|
||||
self.src_flexvol_name, self.dest_flexvol_name)
|
||||
|
||||
self.assertIsNone(retval)
|
||||
mock_get_provisioning_opts_call.assert_called_once_with(
|
||||
self.src_flexvol_name)
|
||||
self.dm_mixin._get_replication_aggregate_map.assert_called_once_with(
|
||||
self.src_backend, self.dest_backend)
|
||||
mock_client_call.assert_called_once_with(
|
||||
self.dest_flexvol_name, 'aggr01', fakes.PROVISIONING_OPTS['size'],
|
||||
volume_type='dp', **expected_prov_opts)
|
||||
|
||||
def test_ensure_snapmirrors(self):
|
||||
flexvols = ['nvol1', 'nvol2']
|
||||
replication_backends = ['fallback1', 'fallback2']
|
||||
self.mock_object(self.dm_mixin, 'get_replication_backend_names',
|
||||
mock.Mock(return_value=replication_backends))
|
||||
self.mock_object(self.dm_mixin, 'create_snapmirror')
|
||||
expected_calls = [
|
||||
mock.call(self.src_backend, replication_backends[0],
|
||||
flexvols[0], flexvols[0]),
|
||||
mock.call(self.src_backend, replication_backends[0],
|
||||
flexvols[1], flexvols[1]),
|
||||
mock.call(self.src_backend, replication_backends[1],
|
||||
flexvols[0], flexvols[0]),
|
||||
mock.call(self.src_backend, replication_backends[1],
|
||||
flexvols[1], flexvols[1]),
|
||||
]
|
||||
|
||||
retval = self.dm_mixin.ensure_snapmirrors(self.mock_src_config,
|
||||
self.src_backend,
|
||||
flexvols)
|
||||
|
||||
self.assertIsNone(retval)
|
||||
self.dm_mixin.get_replication_backend_names.assert_called_once_with(
|
||||
self.mock_src_config)
|
||||
self.dm_mixin.create_snapmirror.assert_has_calls(expected_calls)
|
||||
|
||||
def test_break_snapmirrors(self):
|
||||
flexvols = ['nvol1', 'nvol2']
|
||||
replication_backends = ['fallback1', 'fallback2']
|
||||
side_effects = [None, netapp_api.NaApiError, None, None]
|
||||
self.mock_object(self.dm_mixin, 'get_replication_backend_names',
|
||||
mock.Mock(return_value=replication_backends))
|
||||
self.mock_object(self.dm_mixin, 'break_snapmirror',
|
||||
mock.Mock(side_effect=side_effects))
|
||||
mock_exc_log = self.mock_object(data_motion.LOG, 'exception')
|
||||
expected_calls = [
|
||||
mock.call(self.src_backend, replication_backends[0],
|
||||
flexvols[0], flexvols[0]),
|
||||
mock.call(self.src_backend, replication_backends[0],
|
||||
flexvols[1], flexvols[1]),
|
||||
mock.call(self.src_backend, replication_backends[1],
|
||||
flexvols[0], flexvols[0]),
|
||||
mock.call(self.src_backend, replication_backends[1],
|
||||
flexvols[1], flexvols[1]),
|
||||
]
|
||||
|
||||
failed_to_break = self.dm_mixin.break_snapmirrors(
|
||||
self.mock_src_config, self.src_backend, flexvols, 'fallback1')
|
||||
|
||||
self.assertEqual(1, len(failed_to_break))
|
||||
self.assertEqual(1, mock_exc_log.call_count)
|
||||
self.dm_mixin.get_replication_backend_names.assert_called_once_with(
|
||||
self.mock_src_config)
|
||||
self.dm_mixin.break_snapmirror.assert_has_calls(expected_calls)
|
||||
|
||||
def test_update_snapmirrors(self):
|
||||
flexvols = ['nvol1', 'nvol2']
|
||||
replication_backends = ['fallback1', 'fallback2']
|
||||
self.mock_object(self.dm_mixin, 'get_replication_backend_names',
|
||||
mock.Mock(return_value=replication_backends))
|
||||
side_effects = [None, netapp_api.NaApiError, None, None]
|
||||
self.mock_object(self.dm_mixin, 'update_snapmirror',
|
||||
mock.Mock(side_effect=side_effects))
|
||||
expected_calls = [
|
||||
mock.call(self.src_backend, replication_backends[0],
|
||||
flexvols[0], flexvols[0]),
|
||||
mock.call(self.src_backend, replication_backends[0],
|
||||
flexvols[1], flexvols[1]),
|
||||
mock.call(self.src_backend, replication_backends[1],
|
||||
flexvols[0], flexvols[0]),
|
||||
mock.call(self.src_backend, replication_backends[1],
|
||||
flexvols[1], flexvols[1]),
|
||||
]
|
||||
|
||||
retval = self.dm_mixin.update_snapmirrors(self.mock_src_config,
|
||||
self.src_backend,
|
||||
flexvols)
|
||||
|
||||
self.assertIsNone(retval)
|
||||
self.dm_mixin.get_replication_backend_names.assert_called_once_with(
|
||||
self.mock_src_config)
|
||||
self.dm_mixin.update_snapmirror.assert_has_calls(expected_calls)
|
||||
|
||||
@ddt.data([{'destination-volume': 'nvol3', 'lag-time': '3223'},
|
||||
{'destination-volume': 'nvol5', 'lag-time': '32'}],
|
||||
[])
|
||||
def test__choose_failover_target_no_failover_targets(self, snapmirrors):
|
||||
flexvols = ['nvol1', 'nvol2']
|
||||
replication_backends = ['fallback1', 'fallback2']
|
||||
mock_debug_log = self.mock_object(data_motion.LOG, 'debug')
|
||||
self.mock_object(self.dm_mixin, 'get_snapmirrors',
|
||||
mock.Mock(return_value=snapmirrors))
|
||||
|
||||
target = self.dm_mixin._choose_failover_target(
|
||||
self.src_backend, flexvols, replication_backends)
|
||||
|
||||
self.assertIsNone(target)
|
||||
self.assertEqual(2, mock_debug_log.call_count)
|
||||
|
||||
def test__choose_failover_target(self):
|
||||
flexvols = ['nvol1', 'nvol2']
|
||||
replication_backends = ['fallback1', 'fallback2']
|
||||
target_1_snapmirrors = [
|
||||
{'destination-volume': 'nvol3', 'lag-time': '12'},
|
||||
{'destination-volume': 'nvol1', 'lag-time': '1541'},
|
||||
{'destination-volume': 'nvol2', 'lag-time': '16'},
|
||||
]
|
||||
target_2_snapmirrors = [
|
||||
{'destination-volume': 'nvol2', 'lag-time': '717'},
|
||||
{'destination-volume': 'nvol1', 'lag-time': '323'},
|
||||
{'destination-volume': 'nvol3', 'lag-time': '720'},
|
||||
]
|
||||
mock_debug_log = self.mock_object(data_motion.LOG, 'debug')
|
||||
self.mock_object(self.dm_mixin, 'get_snapmirrors',
|
||||
mock.Mock(side_effect=[target_1_snapmirrors,
|
||||
target_2_snapmirrors]))
|
||||
|
||||
target = self.dm_mixin._choose_failover_target(
|
||||
self.src_backend, flexvols, replication_backends)
|
||||
|
||||
self.assertEqual('fallback2', target)
|
||||
self.assertFalse(mock_debug_log.called)
|
||||
|
||||
def test__failover_host_no_suitable_target(self):
|
||||
flexvols = ['nvol1', 'nvol2']
|
||||
replication_backends = ['fallback1', 'fallback2']
|
||||
self.mock_object(self.dm_mixin, '_choose_failover_target',
|
||||
mock.Mock(return_value=None))
|
||||
self.mock_object(utils, 'get_backend_configuration')
|
||||
self.mock_object(self.dm_mixin, 'update_snapmirrors')
|
||||
self.mock_object(self.dm_mixin, 'break_snapmirrors')
|
||||
|
||||
self.assertRaises(exception.NetAppDriverException,
|
||||
self.dm_mixin._complete_failover,
|
||||
self.src_backend, replication_backends, flexvols,
|
||||
[], failover_target=None)
|
||||
self.assertFalse(utils.get_backend_configuration.called)
|
||||
self.assertFalse(self.dm_mixin.update_snapmirrors.called)
|
||||
self.assertFalse(self.dm_mixin.break_snapmirrors.called)
|
||||
|
||||
@ddt.data('fallback1', None)
|
||||
def test__failover_host(self, failover_target):
|
||||
flexvols = ['nvol1', 'nvol2', 'nvol3']
|
||||
replication_backends = ['fallback1', 'fallback2']
|
||||
volumes = [
|
||||
{'id': 'xyzzy', 'host': 'openstack@backend1#nvol1'},
|
||||
{'id': 'foobar', 'host': 'openstack@backend1#nvol2'},
|
||||
{'id': 'waldofred', 'host': 'openstack@backend1#nvol3'},
|
||||
]
|
||||
expected_volume_updates = [
|
||||
{
|
||||
'volume_id': 'xyzzy',
|
||||
'updates': {'replication_status': 'failed-over'},
|
||||
},
|
||||
{
|
||||
'volume_id': 'foobar',
|
||||
'updates': {'replication_status': 'failed-over'},
|
||||
},
|
||||
{
|
||||
'volume_id': 'waldofred',
|
||||
'updates': {'replication_status': 'error'},
|
||||
},
|
||||
]
|
||||
expected_active_backend_name = failover_target or 'fallback2'
|
||||
self.mock_object(self.dm_mixin, '_choose_failover_target',
|
||||
mock.Mock(return_value='fallback2'))
|
||||
self.mock_object(utils, 'get_backend_configuration')
|
||||
self.mock_object(self.dm_mixin, 'update_snapmirrors')
|
||||
self.mock_object(self.dm_mixin, 'break_snapmirrors',
|
||||
mock.Mock(return_value=['nvol3']))
|
||||
|
||||
actual_active_backend_name, actual_volume_updates = (
|
||||
self.dm_mixin._complete_failover(
|
||||
self.src_backend, replication_backends, flexvols,
|
||||
volumes, failover_target=failover_target)
|
||||
)
|
||||
|
||||
self.assertEqual(expected_active_backend_name,
|
||||
actual_active_backend_name)
|
||||
self.assertEqual(expected_volume_updates, actual_volume_updates)
|
@ -0,0 +1,103 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import ddt
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
|
||||
from cinder import exception
|
||||
from cinder import test
|
||||
from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes
|
||||
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
|
||||
from cinder.volume.drivers.netapp.dataontap.utils import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class NetAppCDOTDataMotionTestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(NetAppCDOTDataMotionTestCase, self).setUp()
|
||||
self.backend = 'backend1'
|
||||
self.mock_cmode_client = self.mock_object(client_cmode, 'Client')
|
||||
self.config = fakes.get_fake_cmode_config(self.backend)
|
||||
CONF.set_override('volume_backend_name', self.backend,
|
||||
group=self.backend, enforce_type=True)
|
||||
CONF.set_override('netapp_transport_type', 'https',
|
||||
group=self.backend, enforce_type=True)
|
||||
CONF.set_override('netapp_login', 'fake_user',
|
||||
group=self.backend, enforce_type=True)
|
||||
CONF.set_override('netapp_password', 'fake_password',
|
||||
group=self.backend, enforce_type=True)
|
||||
CONF.set_override('netapp_server_hostname', 'fake_hostname',
|
||||
group=self.backend, enforce_type=True)
|
||||
CONF.set_override('netapp_server_port', 8866,
|
||||
group=self.backend, enforce_type=True)
|
||||
|
||||
def test_get_backend_configuration(self):
|
||||
self.mock_object(utils, 'CONF')
|
||||
CONF.set_override('netapp_vserver', 'fake_vserver',
|
||||
group=self.backend, enforce_type=True)
|
||||
utils.CONF.list_all_sections.return_value = [self.backend]
|
||||
|
||||
config = utils.get_backend_configuration(self.backend)
|
||||
|
||||
self.assertEqual('fake_vserver', config.netapp_vserver)
|
||||
|
||||
def test_get_backend_configuration_different_backend_name(self):
|
||||
self.mock_object(utils, 'CONF')
|
||||
CONF.set_override('netapp_vserver', 'fake_vserver',
|
||||
group=self.backend, enforce_type=True)
|
||||
CONF.set_override('volume_backend_name', 'fake_backend_name',
|
||||
group=self.backend, enforce_type=True)
|
||||
utils.CONF.list_all_sections.return_value = [self.backend]
|
||||
|
||||
config = utils.get_backend_configuration(self.backend)
|
||||
|
||||
self.assertEqual('fake_vserver', config.netapp_vserver)
|
||||
self.assertEqual('fake_backend_name', config.volume_backend_name)
|
||||
|
||||
@ddt.data([], ['fake_backend1', 'fake_backend2'])
|
||||
def test_get_backend_configuration_not_configured(self, conf_sections):
|
||||
self.mock_object(utils, 'CONF')
|
||||
utils.CONF.list_all_sections.return_value = conf_sections
|
||||
|
||||
self.assertRaises(exception.ConfigNotFound,
|
||||
utils.get_backend_configuration,
|
||||
self.backend)
|
||||
|
||||
def test_get_client_for_backend(self):
|
||||
self.mock_object(utils, 'get_backend_configuration',
|
||||
mock.Mock(return_value=self.config))
|
||||
|
||||
utils.get_client_for_backend(self.backend)
|
||||
|
||||
self.mock_cmode_client.assert_called_once_with(
|
||||
hostname='fake_hostname', password='fake_password',
|
||||
username='fake_user', transport_type='https', port=8866,
|
||||
trace=mock.ANY, vserver=None)
|
||||
|
||||
def test_get_client_for_backend_with_vserver(self):
|
||||
self.mock_object(utils, 'get_backend_configuration',
|
||||
mock.Mock(return_value=self.config))
|
||||
|
||||
CONF.set_override('netapp_vserver', 'fake_vserver',
|
||||
group=self.backend, enforce_type=True)
|
||||
|
||||
utils.get_client_for_backend(self.backend)
|
||||
|
||||
self.mock_cmode_client.assert_called_once_with(
|
||||
hostname='fake_hostname', password='fake_password',
|
||||
username='fake_user', transport_type='https', port=8866,
|
||||
trace=mock.ANY, vserver='fake_vserver')
|
@ -85,8 +85,11 @@ class NetAppDriverFactoryTestCase(test.TestCase):
|
||||
def get_full_class_name(obj):
|
||||
return obj.__module__ + '.' + obj.__class__.__name__
|
||||
|
||||
kwargs = {'configuration': na_fakes.create_configuration(),
|
||||
'app_version': 'fake_info'}
|
||||
kwargs = {
|
||||
'configuration': na_fakes.create_configuration(),
|
||||
'app_version': 'fake_info',
|
||||
'host': 'fakehost@fakebackend',
|
||||
}
|
||||
|
||||
registry = na_common.NETAPP_UNIFIED_DRIVER_REGISTRY
|
||||
|
||||
@ -98,8 +101,11 @@ class NetAppDriverFactoryTestCase(test.TestCase):
|
||||
|
||||
def test_create_driver_case_insensitive(self):
|
||||
|
||||
kwargs = {'configuration': na_fakes.create_configuration(),
|
||||
'app_version': 'fake_info'}
|
||||
kwargs = {
|
||||
'configuration': na_fakes.create_configuration(),
|
||||
'app_version': 'fake_info',
|
||||
'host': 'fakehost@fakebackend',
|
||||
}
|
||||
|
||||
driver = na_common.NetAppDriver.create_driver('ONTAP_CLUSTER', 'FC',
|
||||
**kwargs)
|
||||
@ -108,8 +114,11 @@ class NetAppDriverFactoryTestCase(test.TestCase):
|
||||
|
||||
def test_create_driver_invalid_family(self):
|
||||
|
||||
kwargs = {'configuration': na_fakes.create_configuration(),
|
||||
'app_version': 'fake_info'}
|
||||
kwargs = {
|
||||
'configuration': na_fakes.create_configuration(),
|
||||
'app_version': 'fake_info',
|
||||
'host': 'fakehost@fakebackend',
|
||||
}
|
||||
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
na_common.NetAppDriver.create_driver,
|
||||
@ -117,8 +126,11 @@ class NetAppDriverFactoryTestCase(test.TestCase):
|
||||
|
||||
def test_create_driver_invalid_protocol(self):
|
||||
|
||||
kwargs = {'configuration': na_fakes.create_configuration(),
|
||||
'app_version': 'fake_info'}
|
||||
kwargs = {
|
||||
'configuration': na_fakes.create_configuration(),
|
||||
'app_version': 'fake_info',
|
||||
'host': 'fakehost@fakebackend',
|
||||
}
|
||||
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
na_common.NetAppDriver.create_driver,
|
||||
|
@ -32,6 +32,7 @@ import uuid
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_log import versionutils
|
||||
from oslo_service import loopingcall
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import units
|
||||
import six
|
||||
@ -46,6 +47,7 @@ from cinder.volume import utils as volume_utils
|
||||
from cinder.zonemanager import utils as fczm_utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
HOUSEKEEPING_INTERVAL_SECONDS = 600 # ten minutes
|
||||
|
||||
|
||||
class NetAppLun(object):
|
||||
@ -103,6 +105,8 @@ class NetAppBlockStorageLibrary(object):
|
||||
self.lun_space_reservation = 'true'
|
||||
self.lookup_service = fczm_utils.create_lookup_service()
|
||||
self.app_version = kwargs.get("app_version", "unknown")
|
||||
self.host = kwargs.get('host')
|
||||
self.backend_name = self.host.split('@')[1]
|
||||
|
||||
self.configuration = kwargs['configuration']
|
||||
self.configuration.append_config_values(na_opts.netapp_connection_opts)
|
||||
@ -167,6 +171,21 @@ class NetAppBlockStorageLibrary(object):
|
||||
self._extract_and_populate_luns(lun_list)
|
||||
LOG.debug("Success getting list of LUNs from server.")
|
||||
|
||||
self._start_periodic_tasks()
|
||||
|
||||
def _start_periodic_tasks(self):
|
||||
"""Start recurring tasks common to all Data ONTAP block drivers."""
|
||||
|
||||
# Start the task that runs other housekeeping tasks, such as deletion
|
||||
# of previously soft-deleted storage artifacts.
|
||||
housekeeping_periodic_task = loopingcall.FixedIntervalLoopingCall(
|
||||
self._handle_housekeeping_tasks)
|
||||
housekeeping_periodic_task.start(
|
||||
interval=HOUSEKEEPING_INTERVAL_SECONDS, initial_delay=0)
|
||||
|
||||
def _handle_housekeeping_tasks(self):
|
||||
"""Handle various cleanup activities."""
|
||||
|
||||
def get_pool(self, volume):
|
||||
"""Return pool name where volume resides.
|
||||
|
||||
|
@ -33,20 +33,21 @@ from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder import utils
|
||||
from cinder.volume.drivers.netapp.dataontap import block_base
|
||||
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
|
||||
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
|
||||
from cinder.volume.drivers.netapp.dataontap.utils import capabilities
|
||||
from cinder.volume.drivers.netapp.dataontap.utils import data_motion
|
||||
from cinder.volume.drivers.netapp.dataontap.utils import utils as cmode_utils
|
||||
from cinder.volume.drivers.netapp import options as na_opts
|
||||
from cinder.volume.drivers.netapp import utils as na_utils
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
QOS_CLEANUP_INTERVAL_SECONDS = 60
|
||||
SSC_UPDATE_INTERVAL_SECONDS = 3600 # hourly
|
||||
|
||||
|
||||
@six.add_metaclass(utils.TraceWrapperMetaclass)
|
||||
class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary):
|
||||
class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
|
||||
data_motion.DataMotionMixin):
|
||||
"""NetApp block storage library for Data ONTAP (Cluster-mode)."""
|
||||
|
||||
REQUIRED_CMODE_FLAGS = ['netapp_vserver']
|
||||
@ -57,27 +58,42 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary):
|
||||
**kwargs)
|
||||
self.configuration.append_config_values(na_opts.netapp_cluster_opts)
|
||||
self.driver_mode = 'cluster'
|
||||
self.failed_over_backend_name = kwargs.get('active_backend_id')
|
||||
self.failed_over = self.failed_over_backend_name is not None
|
||||
self.replication_enabled = (
|
||||
True if self.get_replication_backend_names(
|
||||
self.configuration) else False)
|
||||
|
||||
def do_setup(self, context):
|
||||
super(NetAppBlockStorageCmodeLibrary, self).do_setup(context)
|
||||
na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration)
|
||||
|
||||
self.vserver = self.configuration.netapp_vserver
|
||||
|
||||
self.zapi_client = client_cmode.Client(
|
||||
transport_type=self.configuration.netapp_transport_type,
|
||||
username=self.configuration.netapp_login,
|
||||
password=self.configuration.netapp_password,
|
||||
hostname=self.configuration.netapp_server_hostname,
|
||||
port=self.configuration.netapp_server_port,
|
||||
vserver=self.vserver)
|
||||
# cDOT API client
|
||||
self.zapi_client = cmode_utils.get_client_for_backend(
|
||||
self.failed_over_backend_name or self.backend_name)
|
||||
self.vserver = self.zapi_client.vserver
|
||||
|
||||
# Performance monitoring library
|
||||
self.perf_library = perf_cmode.PerformanceCmodeLibrary(
|
||||
self.zapi_client)
|
||||
|
||||
# Storage service catalog
|
||||
self.ssc_library = capabilities.CapabilitiesLibrary(
|
||||
self.driver_protocol, self.vserver, self.zapi_client,
|
||||
self.configuration)
|
||||
|
||||
def _update_zapi_client(self, backend_name):
|
||||
"""Set cDOT API client for the specified config backend stanza name."""
|
||||
|
||||
self.zapi_client = cmode_utils.get_client_for_backend(backend_name)
|
||||
self.vserver = self.zapi_client.vserver
|
||||
self.ssc_library._update_for_failover(self.zapi_client,
|
||||
self._get_flexvol_to_pool_map())
|
||||
ssc = self.ssc_library.get_ssc()
|
||||
self.perf_library._update_for_failover(self.zapi_client, ssc)
|
||||
# Clear LUN table cache
|
||||
self.lun_table = {}
|
||||
|
||||
def check_for_setup_error(self):
|
||||
"""Check that the driver is working and can communicate."""
|
||||
self.ssc_library.check_api_permissions()
|
||||
@ -89,9 +105,9 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary):
|
||||
raise exception.NetAppDriverException(msg)
|
||||
|
||||
super(NetAppBlockStorageCmodeLibrary, self).check_for_setup_error()
|
||||
self._start_periodic_tasks()
|
||||
|
||||
def _start_periodic_tasks(self):
|
||||
"""Start recurring tasks for NetApp cDOT block drivers."""
|
||||
|
||||
# Note(cknight): Run the task once in the current thread to prevent a
|
||||
# race with the first invocation of _update_volume_stats.
|
||||
@ -104,12 +120,32 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary):
|
||||
interval=SSC_UPDATE_INTERVAL_SECONDS,
|
||||
initial_delay=SSC_UPDATE_INTERVAL_SECONDS)
|
||||
|
||||
# Start the task that harvests soft-deleted QoS policy groups.
|
||||
harvest_qos_periodic_task = loopingcall.FixedIntervalLoopingCall(
|
||||
self.zapi_client.remove_unused_qos_policy_groups)
|
||||
harvest_qos_periodic_task.start(
|
||||
interval=QOS_CLEANUP_INTERVAL_SECONDS,
|
||||
initial_delay=QOS_CLEANUP_INTERVAL_SECONDS)
|
||||
super(NetAppBlockStorageCmodeLibrary, self)._start_periodic_tasks()
|
||||
|
||||
def _handle_housekeeping_tasks(self):
|
||||
"""Handle various cleanup activities."""
|
||||
(super(NetAppBlockStorageCmodeLibrary, self).
|
||||
_handle_housekeeping_tasks())
|
||||
|
||||
# Harvest soft-deleted QoS policy groups
|
||||
self.zapi_client.remove_unused_qos_policy_groups()
|
||||
|
||||
active_backend = self.failed_over_backend_name or self.backend_name
|
||||
|
||||
LOG.debug("Current service state: Replication enabled: %("
|
||||
"replication)s. Failed-Over: %(failed)s. Active Backend "
|
||||
"ID: %(active)s",
|
||||
{
|
||||
'replication': self.replication_enabled,
|
||||
'failed': self.failed_over,
|
||||
'active': active_backend,
|
||||
})
|
||||
|
||||
# Create pool mirrors if whole-backend replication configured
|
||||
if self.replication_enabled and not self.failed_over:
|
||||
self.ensure_snapmirrors(
|
||||
self.configuration, self.backend_name,
|
||||
self.ssc_library.get_ssc_flexvol_names())
|
||||
|
||||
def _create_lun(self, volume_name, lun_name, size,
|
||||
metadata, qos_policy_group_name=None):
|
||||
@ -118,8 +154,9 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary):
|
||||
self.zapi_client.create_lun(
|
||||
volume_name, lun_name, size, metadata, qos_policy_group_name)
|
||||
|
||||
def _create_lun_handle(self, metadata):
|
||||
def _create_lun_handle(self, metadata, vserver=None):
|
||||
"""Returns LUN handle based on filer type."""
|
||||
vserver = vserver or self.vserver
|
||||
return '%s:%s' % (self.vserver, metadata['Path'])
|
||||
|
||||
def _find_mapped_lun_igroup(self, path, initiator_list):
|
||||
@ -186,7 +223,7 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary):
|
||||
|
||||
def _update_volume_stats(self, filter_function=None,
|
||||
goodness_function=None):
|
||||
"""Retrieve stats info from vserver."""
|
||||
"""Retrieve backend stats."""
|
||||
|
||||
LOG.debug('Updating volume stats')
|
||||
data = {}
|
||||
@ -199,6 +236,7 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary):
|
||||
filter_function=filter_function,
|
||||
goodness_function=goodness_function)
|
||||
data['sparse_copy_volume'] = True
|
||||
data.update(self.get_replication_backend_stats(self.configuration))
|
||||
|
||||
self.zapi_client.provide_ems(self, self.driver_name, self.app_version)
|
||||
self._stats = data
|
||||
@ -368,3 +406,8 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary):
|
||||
qos_policy_group_info = None
|
||||
self._mark_qos_policy_group_for_deletion(qos_policy_group_info)
|
||||
super(NetAppBlockStorageCmodeLibrary, self).unmanage(volume)
|
||||
|
||||
def failover_host(self, context, volumes, secondary_id=None):
|
||||
"""Failover a backend to a secondary replication target."""
|
||||
|
||||
return self._failover_host(volumes, secondary_id=secondary_id)
|
||||
|
@ -37,10 +37,18 @@ from cinder import utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
EAPIERROR = '13001'
|
||||
EAPIPRIVILEGE = '13003'
|
||||
EAPINOTFOUND = '13005'
|
||||
ESIS_CLONE_NOT_LICENSED = '14956'
|
||||
ESNAPSHOTNOTALLOWED = '13023'
|
||||
ESIS_CLONE_NOT_LICENSED = '14956'
|
||||
EOBJECTNOTFOUND = '15661'
|
||||
ESOURCE_IS_DIFFERENT = '17105'
|
||||
ERELATION_EXISTS = '17122'
|
||||
ERELATION_NOT_QUIESCED = '17127'
|
||||
ENOTRANSFER_IN_PROGRESS = '17130'
|
||||
EANOTHER_OP_ACTIVE = '17131'
|
||||
ETRANSFER_IN_PROGRESS = '17137'
|
||||
|
||||
|
||||
class NaServer(object):
|
||||
|
@ -73,6 +73,11 @@ class Client(object):
|
||||
minor = res.get_child_content('minor-version')
|
||||
return major, minor
|
||||
|
||||
def _strip_xml_namespace(self, string):
|
||||
if string.startswith('{') and '}' in string:
|
||||
return string.split('}', 1)[1]
|
||||
return string
|
||||
|
||||
def check_is_naelement(self, elem):
|
||||
"""Checks if object is instance of NaElement."""
|
||||
if not isinstance(elem, netapp_api.NaElement):
|
||||
|
@ -20,6 +20,7 @@ import math
|
||||
import re
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import units
|
||||
import six
|
||||
|
||||
from cinder import exception
|
||||
@ -61,6 +62,7 @@ class Client(client_base.Client):
|
||||
ontapi_1_30 = ontapi_version >= (1, 30)
|
||||
ontapi_1_100 = ontapi_version >= (1, 100)
|
||||
|
||||
self.features.add_feature('SNAPMIRROR_V2', supported=ontapi_1_20)
|
||||
self.features.add_feature('USER_CAPABILITY_LIST',
|
||||
supported=ontapi_1_20)
|
||||
self.features.add_feature('SYSTEM_METRICS', supported=ontapi_1_2x)
|
||||
@ -70,6 +72,7 @@ class Client(client_base.Client):
|
||||
self.features.add_feature('ADVANCED_DISK_PARTITIONING',
|
||||
supported=ontapi_1_30)
|
||||
self.features.add_feature('BACKUP_CLONE_PARAM', supported=ontapi_1_100)
|
||||
self.features.add_feature('CLUSTER_PEER_POLICY', supported=ontapi_1_30)
|
||||
|
||||
def _invoke_vserver_api(self, na_element, vserver):
|
||||
server = copy.copy(self.connection)
|
||||
@ -890,6 +893,7 @@ class Client(client_base.Client):
|
||||
'owning-vserver-name': None,
|
||||
'junction-path': None,
|
||||
'containing-aggregate-name': None,
|
||||
'type': None,
|
||||
},
|
||||
'volume-mirror-attributes': {
|
||||
'is-data-protection-mirror': None,
|
||||
@ -898,10 +902,18 @@ class Client(client_base.Client):
|
||||
'volume-space-attributes': {
|
||||
'is-space-guarantee-enabled': None,
|
||||
'space-guarantee': None,
|
||||
'percentage-snapshot-reserve': None,
|
||||
'size': None,
|
||||
},
|
||||
'volume-qos-attributes': {
|
||||
'policy-group-name': None,
|
||||
}
|
||||
},
|
||||
'volume-snapshot-attributes': {
|
||||
'snapshot-policy': None,
|
||||
},
|
||||
'volume-language-attributes': {
|
||||
'language-code': None,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -924,6 +936,10 @@ class Client(client_base.Client):
|
||||
'volume-space-attributes') or netapp_api.NaElement('none')
|
||||
volume_qos_attributes = volume_attributes.get_child_by_name(
|
||||
'volume-qos-attributes') or netapp_api.NaElement('none')
|
||||
volume_snapshot_attributes = volume_attributes.get_child_by_name(
|
||||
'volume-snapshot-attributes') or netapp_api.NaElement('none')
|
||||
volume_language_attributes = volume_attributes.get_child_by_name(
|
||||
'volume-language-attributes') or netapp_api.NaElement('none')
|
||||
|
||||
volume = {
|
||||
'name': volume_id_attributes.get_child_content('name'),
|
||||
@ -933,13 +949,22 @@ class Client(client_base.Client):
|
||||
'junction-path'),
|
||||
'aggregate': volume_id_attributes.get_child_content(
|
||||
'containing-aggregate-name'),
|
||||
'type': volume_id_attributes.get_child_content('type'),
|
||||
'space-guarantee-enabled': strutils.bool_from_string(
|
||||
volume_space_attributes.get_child_content(
|
||||
'is-space-guarantee-enabled')),
|
||||
'space-guarantee': volume_space_attributes.get_child_content(
|
||||
'space-guarantee'),
|
||||
'percentage-snapshot-reserve': (
|
||||
volume_space_attributes.get_child_content(
|
||||
'percentage-snapshot-reserve')),
|
||||
'size': volume_space_attributes.get_child_content('size'),
|
||||
'qos-policy-group': volume_qos_attributes.get_child_content(
|
||||
'policy-group-name')
|
||||
'policy-group-name'),
|
||||
'snapshot-policy': volume_snapshot_attributes.get_child_content(
|
||||
'snapshot-policy'),
|
||||
'language': volume_language_attributes.get_child_content(
|
||||
'language-code'),
|
||||
}
|
||||
|
||||
return volume
|
||||
@ -1015,6 +1040,106 @@ class Client(client_base.Client):
|
||||
|
||||
return True
|
||||
|
||||
def create_flexvol(self, flexvol_name, aggregate_name, size_gb,
|
||||
space_guarantee_type=None, snapshot_policy=None,
|
||||
language=None, dedupe_enabled=False,
|
||||
compression_enabled=False, snapshot_reserve=None,
|
||||
volume_type='rw'):
|
||||
|
||||
"""Creates a volume."""
|
||||
api_args = {
|
||||
'containing-aggr-name': aggregate_name,
|
||||
'size': six.text_type(size_gb) + 'g',
|
||||
'volume': flexvol_name,
|
||||
'volume-type': volume_type,
|
||||
}
|
||||
if volume_type == 'dp':
|
||||
snapshot_policy = None
|
||||
else:
|
||||
api_args['junction-path'] = '/%s' % flexvol_name
|
||||
if snapshot_policy is not None:
|
||||
api_args['snapshot-policy'] = snapshot_policy
|
||||
if space_guarantee_type:
|
||||
api_args['space-reserve'] = space_guarantee_type
|
||||
if language is not None:
|
||||
api_args['language-code'] = language
|
||||
if snapshot_reserve is not None:
|
||||
api_args['percentage-snapshot-reserve'] = six.text_type(
|
||||
snapshot_reserve)
|
||||
self.send_request('volume-create', api_args)
|
||||
|
||||
# cDOT compression requires that deduplication be enabled.
|
||||
if dedupe_enabled or compression_enabled:
|
||||
self.enable_flexvol_dedupe(flexvol_name)
|
||||
if compression_enabled:
|
||||
self.enable_flexvol_compression(flexvol_name)
|
||||
|
||||
def flexvol_exists(self, volume_name):
|
||||
"""Checks if a flexvol exists on the storage array."""
|
||||
LOG.debug('Checking if volume %s exists', volume_name)
|
||||
|
||||
api_args = {
|
||||
'query': {
|
||||
'volume-attributes': {
|
||||
'volume-id-attributes': {
|
||||
'name': volume_name,
|
||||
},
|
||||
},
|
||||
},
|
||||
'desired-attributes': {
|
||||
'volume-attributes': {
|
||||
'volume-id-attributes': {
|
||||
'name': None,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
result = self.send_iter_request('volume-get-iter', api_args)
|
||||
return self._has_records(result)
|
||||
|
||||
def rename_flexvol(self, orig_flexvol_name, new_flexvol_name):
|
||||
"""Set flexvol name."""
|
||||
api_args = {
|
||||
'volume': orig_flexvol_name,
|
||||
'new-volume-name': new_flexvol_name,
|
||||
}
|
||||
self.send_request('volume-rename', api_args)
|
||||
|
||||
def mount_flexvol(self, flexvol_name, junction_path=None):
|
||||
"""Mounts a volume on a junction path."""
|
||||
api_args = {
|
||||
'volume-name': flexvol_name,
|
||||
'junction-path': (junction_path if junction_path
|
||||
else '/%s' % flexvol_name)
|
||||
}
|
||||
self.send_request('volume-mount', api_args)
|
||||
|
||||
def enable_flexvol_dedupe(self, flexvol_name):
|
||||
"""Enable deduplication on volume."""
|
||||
api_args = {'path': '/vol/%s' % flexvol_name}
|
||||
self.send_request('sis-enable', api_args)
|
||||
|
||||
def disable_flexvol_dedupe(self, flexvol_name):
|
||||
"""Disable deduplication on volume."""
|
||||
api_args = {'path': '/vol/%s' % flexvol_name}
|
||||
self.send_request('sis-disable', api_args)
|
||||
|
||||
def enable_flexvol_compression(self, flexvol_name):
|
||||
"""Enable compression on volume."""
|
||||
api_args = {
|
||||
'path': '/vol/%s' % flexvol_name,
|
||||
'enable-compression': 'true'
|
||||
}
|
||||
self.send_request('sis-set-config', api_args)
|
||||
|
||||
def disable_flexvol_compression(self, flexvol_name):
|
||||
"""Disable compression on volume."""
|
||||
api_args = {
|
||||
'path': '/vol/%s' % flexvol_name,
|
||||
'enable-compression': 'false'
|
||||
}
|
||||
self.send_request('sis-set-config', api_args)
|
||||
|
||||
@utils.trace_method
|
||||
def delete_file(self, path_to_file):
|
||||
"""Delete file at path."""
|
||||
@ -1400,3 +1525,492 @@ class Client(client_base.Client):
|
||||
'volume %(vol)s.')
|
||||
msg_args = {'snap': snapshot_name, 'vol': volume_name}
|
||||
raise exception.VolumeBackendAPIException(data=msg % msg_args)
|
||||
|
||||
def create_cluster_peer(self, addresses, username=None, password=None,
|
||||
passphrase=None):
|
||||
"""Creates a cluster peer relationship."""
|
||||
|
||||
api_args = {
|
||||
'peer-addresses': [
|
||||
{'remote-inet-address': address} for address in addresses
|
||||
],
|
||||
}
|
||||
if username:
|
||||
api_args['user-name'] = username
|
||||
if password:
|
||||
api_args['password'] = password
|
||||
if passphrase:
|
||||
api_args['passphrase'] = passphrase
|
||||
|
||||
self.send_request('cluster-peer-create', api_args)
|
||||
|
||||
def get_cluster_peers(self, remote_cluster_name=None):
|
||||
"""Gets one or more cluster peer relationships."""
|
||||
|
||||
api_args = {}
|
||||
if remote_cluster_name:
|
||||
api_args['query'] = {
|
||||
'cluster-peer-info': {
|
||||
'remote-cluster-name': remote_cluster_name,
|
||||
}
|
||||
}
|
||||
|
||||
result = self.send_iter_request('cluster-peer-get-iter', api_args)
|
||||
if not self._has_records(result):
|
||||
return []
|
||||
|
||||
cluster_peers = []
|
||||
|
||||
for cluster_peer_info in result.get_child_by_name(
|
||||
'attributes-list').get_children():
|
||||
|
||||
cluster_peer = {
|
||||
'active-addresses': [],
|
||||
'peer-addresses': []
|
||||
}
|
||||
|
||||
active_addresses = cluster_peer_info.get_child_by_name(
|
||||
'active-addresses') or netapp_api.NaElement('none')
|
||||
for address in active_addresses.get_children():
|
||||
cluster_peer['active-addresses'].append(address.get_content())
|
||||
|
||||
peer_addresses = cluster_peer_info.get_child_by_name(
|
||||
'peer-addresses') or netapp_api.NaElement('none')
|
||||
for address in peer_addresses.get_children():
|
||||
cluster_peer['peer-addresses'].append(address.get_content())
|
||||
|
||||
cluster_peer['availability'] = cluster_peer_info.get_child_content(
|
||||
'availability')
|
||||
cluster_peer['cluster-name'] = cluster_peer_info.get_child_content(
|
||||
'cluster-name')
|
||||
cluster_peer['cluster-uuid'] = cluster_peer_info.get_child_content(
|
||||
'cluster-uuid')
|
||||
cluster_peer['remote-cluster-name'] = (
|
||||
cluster_peer_info.get_child_content('remote-cluster-name'))
|
||||
cluster_peer['serial-number'] = (
|
||||
cluster_peer_info.get_child_content('serial-number'))
|
||||
cluster_peer['timeout'] = cluster_peer_info.get_child_content(
|
||||
'timeout')
|
||||
|
||||
cluster_peers.append(cluster_peer)
|
||||
|
||||
return cluster_peers
|
||||
|
||||
def delete_cluster_peer(self, cluster_name):
|
||||
"""Deletes a cluster peer relationship."""
|
||||
|
||||
api_args = {'cluster-name': cluster_name}
|
||||
self.send_request('cluster-peer-delete', api_args)
|
||||
|
||||
def get_cluster_peer_policy(self):
|
||||
"""Gets the cluster peering policy configuration."""
|
||||
|
||||
if not self.features.CLUSTER_PEER_POLICY:
|
||||
return {}
|
||||
|
||||
result = self.send_request('cluster-peer-policy-get')
|
||||
|
||||
attributes = result.get_child_by_name(
|
||||
'attributes') or netapp_api.NaElement('none')
|
||||
cluster_peer_policy = attributes.get_child_by_name(
|
||||
'cluster-peer-policy') or netapp_api.NaElement('none')
|
||||
|
||||
policy = {
|
||||
'is-unauthenticated-access-permitted':
|
||||
cluster_peer_policy.get_child_content(
|
||||
'is-unauthenticated-access-permitted'),
|
||||
'passphrase-minimum-length':
|
||||
cluster_peer_policy.get_child_content(
|
||||
'passphrase-minimum-length'),
|
||||
}
|
||||
|
||||
if policy['is-unauthenticated-access-permitted'] is not None:
|
||||
policy['is-unauthenticated-access-permitted'] = (
|
||||
strutils.bool_from_string(
|
||||
policy['is-unauthenticated-access-permitted']))
|
||||
if policy['passphrase-minimum-length'] is not None:
|
||||
policy['passphrase-minimum-length'] = int(
|
||||
policy['passphrase-minimum-length'])
|
||||
|
||||
return policy
|
||||
|
||||
def set_cluster_peer_policy(self, is_unauthenticated_access_permitted=None,
|
||||
passphrase_minimum_length=None):
|
||||
"""Modifies the cluster peering policy configuration."""
|
||||
|
||||
if not self.features.CLUSTER_PEER_POLICY:
|
||||
return
|
||||
|
||||
if (is_unauthenticated_access_permitted is None and
|
||||
passphrase_minimum_length is None):
|
||||
return
|
||||
|
||||
api_args = {}
|
||||
if is_unauthenticated_access_permitted is not None:
|
||||
api_args['is-unauthenticated-access-permitted'] = (
|
||||
'true' if strutils.bool_from_string(
|
||||
is_unauthenticated_access_permitted) else 'false')
|
||||
if passphrase_minimum_length is not None:
|
||||
api_args['passphrase-minlength'] = six.text_type(
|
||||
passphrase_minimum_length)
|
||||
|
||||
self.send_request('cluster-peer-policy-modify', api_args)
|
||||
|
||||
def create_vserver_peer(self, vserver_name, peer_vserver_name):
|
||||
"""Creates a Vserver peer relationship for SnapMirrors."""
|
||||
api_args = {
|
||||
'vserver': vserver_name,
|
||||
'peer-vserver': peer_vserver_name,
|
||||
'applications': [
|
||||
{'vserver-peer-application': 'snapmirror'},
|
||||
],
|
||||
}
|
||||
self.send_request('vserver-peer-create', api_args)
|
||||
|
||||
def delete_vserver_peer(self, vserver_name, peer_vserver_name):
|
||||
"""Deletes a Vserver peer relationship."""
|
||||
|
||||
api_args = {'vserver': vserver_name, 'peer-vserver': peer_vserver_name}
|
||||
self.send_request('vserver-peer-delete', api_args)
|
||||
|
||||
def accept_vserver_peer(self, vserver_name, peer_vserver_name):
|
||||
"""Accepts a pending Vserver peer relationship."""
|
||||
|
||||
api_args = {'vserver': vserver_name, 'peer-vserver': peer_vserver_name}
|
||||
self.send_request('vserver-peer-accept', api_args)
|
||||
|
||||
def get_vserver_peers(self, vserver_name=None, peer_vserver_name=None):
|
||||
"""Gets one or more Vserver peer relationships."""
|
||||
|
||||
api_args = None
|
||||
if vserver_name or peer_vserver_name:
|
||||
api_args = {'query': {'vserver-peer-info': {}}}
|
||||
if vserver_name:
|
||||
api_args['query']['vserver-peer-info']['vserver'] = (
|
||||
vserver_name)
|
||||
if peer_vserver_name:
|
||||
api_args['query']['vserver-peer-info']['peer-vserver'] = (
|
||||
peer_vserver_name)
|
||||
|
||||
result = self.send_iter_request('vserver-peer-get-iter', api_args)
|
||||
if not self._has_records(result):
|
||||
return []
|
||||
|
||||
vserver_peers = []
|
||||
|
||||
for vserver_peer_info in result.get_child_by_name(
|
||||
'attributes-list').get_children():
|
||||
|
||||
vserver_peer = {
|
||||
'vserver': vserver_peer_info.get_child_content('vserver'),
|
||||
'peer-vserver':
|
||||
vserver_peer_info.get_child_content('peer-vserver'),
|
||||
'peer-state':
|
||||
vserver_peer_info.get_child_content('peer-state'),
|
||||
'peer-cluster':
|
||||
vserver_peer_info.get_child_content('peer-cluster'),
|
||||
}
|
||||
vserver_peers.append(vserver_peer)
|
||||
|
||||
return vserver_peers
|
||||
|
||||
def _ensure_snapmirror_v2(self):
|
||||
"""Verify support for SnapMirror control plane v2."""
|
||||
if not self.features.SNAPMIRROR_V2:
|
||||
msg = _('SnapMirror features require Data ONTAP 8.2 or later.')
|
||||
raise exception.NetAppDriverException(msg)
|
||||
|
||||
def create_snapmirror(self, source_vserver, source_volume,
|
||||
destination_vserver, destination_volume,
|
||||
schedule=None, policy=None,
|
||||
relationship_type='data_protection'):
|
||||
"""Creates a SnapMirror relationship (cDOT 8.2 or later only)."""
|
||||
self._ensure_snapmirror_v2()
|
||||
|
||||
api_args = {
|
||||
'source-volume': source_volume,
|
||||
'source-vserver': source_vserver,
|
||||
'destination-volume': destination_volume,
|
||||
'destination-vserver': destination_vserver,
|
||||
'relationship-type': relationship_type,
|
||||
}
|
||||
if schedule:
|
||||
api_args['schedule'] = schedule
|
||||
if policy:
|
||||
api_args['policy'] = policy
|
||||
|
||||
try:
|
||||
self.send_request('snapmirror-create', api_args)
|
||||
except netapp_api.NaApiError as e:
|
||||
if e.code != netapp_api.ERELATION_EXISTS:
|
||||
raise
|
||||
|
||||
def initialize_snapmirror(self, source_vserver, source_volume,
|
||||
destination_vserver, destination_volume,
|
||||
source_snapshot=None, transfer_priority=None):
|
||||
"""Initializes a SnapMirror relationship (cDOT 8.2 or later only)."""
|
||||
self._ensure_snapmirror_v2()
|
||||
|
||||
api_args = {
|
||||
'source-volume': source_volume,
|
||||
'source-vserver': source_vserver,
|
||||
'destination-volume': destination_volume,
|
||||
'destination-vserver': destination_vserver,
|
||||
}
|
||||
if source_snapshot:
|
||||
api_args['source-snapshot'] = source_snapshot
|
||||
if transfer_priority:
|
||||
api_args['transfer-priority'] = transfer_priority
|
||||
|
||||
result = self.send_request('snapmirror-initialize', api_args)
|
||||
|
||||
result_info = {}
|
||||
result_info['operation-id'] = result.get_child_content(
|
||||
'result-operation-id')
|
||||
result_info['status'] = result.get_child_content('result-status')
|
||||
result_info['jobid'] = result.get_child_content('result-jobid')
|
||||
result_info['error-code'] = result.get_child_content(
|
||||
'result-error-code')
|
||||
result_info['error-message'] = result.get_child_content(
|
||||
'result-error-message')
|
||||
|
||||
return result_info
|
||||
|
||||
def release_snapmirror(self, source_vserver, source_volume,
|
||||
destination_vserver, destination_volume,
|
||||
relationship_info_only=False):
|
||||
"""Removes a SnapMirror relationship on the source endpoint."""
|
||||
self._ensure_snapmirror_v2()
|
||||
|
||||
api_args = {
|
||||
'query': {
|
||||
'snapmirror-destination-info': {
|
||||
'source-volume': source_volume,
|
||||
'source-vserver': source_vserver,
|
||||
'destination-volume': destination_volume,
|
||||
'destination-vserver': destination_vserver,
|
||||
'relationship-info-only': ('true' if relationship_info_only
|
||||
else 'false'),
|
||||
}
|
||||
}
|
||||
}
|
||||
self.send_request('snapmirror-release-iter', api_args)
|
||||
|
||||
def quiesce_snapmirror(self, source_vserver, source_volume,
|
||||
destination_vserver, destination_volume):
|
||||
"""Disables future transfers to a SnapMirror destination."""
|
||||
self._ensure_snapmirror_v2()
|
||||
|
||||
api_args = {
|
||||
'source-volume': source_volume,
|
||||
'source-vserver': source_vserver,
|
||||
'destination-volume': destination_volume,
|
||||
'destination-vserver': destination_vserver,
|
||||
}
|
||||
self.send_request('snapmirror-quiesce', api_args)
|
||||
|
||||
def abort_snapmirror(self, source_vserver, source_volume,
|
||||
destination_vserver, destination_volume,
|
||||
clear_checkpoint=False):
|
||||
"""Stops ongoing transfers for a SnapMirror relationship."""
|
||||
self._ensure_snapmirror_v2()
|
||||
|
||||
api_args = {
|
||||
'source-volume': source_volume,
|
||||
'source-vserver': source_vserver,
|
||||
'destination-volume': destination_volume,
|
||||
'destination-vserver': destination_vserver,
|
||||
'clear-checkpoint': 'true' if clear_checkpoint else 'false',
|
||||
}
|
||||
try:
|
||||
self.send_request('snapmirror-abort', api_args)
|
||||
except netapp_api.NaApiError as e:
|
||||
if e.code != netapp_api.ENOTRANSFER_IN_PROGRESS:
|
||||
raise
|
||||
|
||||
def break_snapmirror(self, source_vserver, source_volume,
|
||||
destination_vserver, destination_volume):
|
||||
"""Breaks a data protection SnapMirror relationship."""
|
||||
self._ensure_snapmirror_v2()
|
||||
|
||||
api_args = {
|
||||
'source-volume': source_volume,
|
||||
'source-vserver': source_vserver,
|
||||
'destination-volume': destination_volume,
|
||||
'destination-vserver': destination_vserver,
|
||||
}
|
||||
self.send_request('snapmirror-break', api_args)
|
||||
|
||||
def modify_snapmirror(self, source_vserver, source_volume,
|
||||
destination_vserver, destination_volume,
|
||||
schedule=None, policy=None, tries=None,
|
||||
max_transfer_rate=None):
|
||||
"""Modifies a SnapMirror relationship."""
|
||||
self._ensure_snapmirror_v2()
|
||||
|
||||
api_args = {
|
||||
'source-volume': source_volume,
|
||||
'source-vserver': source_vserver,
|
||||
'destination-volume': destination_volume,
|
||||
'destination-vserver': destination_vserver,
|
||||
}
|
||||
if schedule:
|
||||
api_args['schedule'] = schedule
|
||||
if policy:
|
||||
api_args['policy'] = policy
|
||||
if tries is not None:
|
||||
api_args['tries'] = tries
|
||||
if max_transfer_rate is not None:
|
||||
api_args['max-transfer-rate'] = max_transfer_rate
|
||||
|
||||
self.send_request('snapmirror-modify', api_args)
|
||||
|
||||
def delete_snapmirror(self, source_vserver, source_volume,
|
||||
destination_vserver, destination_volume):
|
||||
"""Destroys a SnapMirror relationship."""
|
||||
self._ensure_snapmirror_v2()
|
||||
|
||||
api_args = {
|
||||
'query': {
|
||||
'snapmirror-info': {
|
||||
'source-volume': source_volume,
|
||||
'source-vserver': source_vserver,
|
||||
'destination-volume': destination_volume,
|
||||
'destination-vserver': destination_vserver,
|
||||
}
|
||||
}
|
||||
}
|
||||
self.send_request('snapmirror-destroy-iter', api_args)
|
||||
|
||||
def update_snapmirror(self, source_vserver, source_volume,
|
||||
destination_vserver, destination_volume):
|
||||
"""Schedules a SnapMirror update."""
|
||||
self._ensure_snapmirror_v2()
|
||||
|
||||
api_args = {
|
||||
'source-volume': source_volume,
|
||||
'source-vserver': source_vserver,
|
||||
'destination-volume': destination_volume,
|
||||
'destination-vserver': destination_vserver,
|
||||
}
|
||||
try:
|
||||
self.send_request('snapmirror-update', api_args)
|
||||
except netapp_api.NaApiError as e:
|
||||
if (e.code != netapp_api.ETRANSFER_IN_PROGRESS and
|
||||
e.code != netapp_api.EANOTHER_OP_ACTIVE):
|
||||
raise
|
||||
|
||||
def resume_snapmirror(self, source_vserver, source_volume,
|
||||
destination_vserver, destination_volume):
|
||||
"""Resume a SnapMirror relationship if it is quiesced."""
|
||||
self._ensure_snapmirror_v2()
|
||||
|
||||
api_args = {
|
||||
'source-volume': source_volume,
|
||||
'source-vserver': source_vserver,
|
||||
'destination-volume': destination_volume,
|
||||
'destination-vserver': destination_vserver,
|
||||
}
|
||||
try:
|
||||
self.send_request('snapmirror-resume', api_args)
|
||||
except netapp_api.NaApiError as e:
|
||||
if e.code != netapp_api.ERELATION_NOT_QUIESCED:
|
||||
raise
|
||||
|
||||
def resync_snapmirror(self, source_vserver, source_volume,
|
||||
destination_vserver, destination_volume):
|
||||
"""Resync a SnapMirror relationship."""
|
||||
self._ensure_snapmirror_v2()
|
||||
|
||||
api_args = {
|
||||
'source-volume': source_volume,
|
||||
'source-vserver': source_vserver,
|
||||
'destination-volume': destination_volume,
|
||||
'destination-vserver': destination_vserver,
|
||||
}
|
||||
self.send_request('snapmirror-resync', api_args)
|
||||
|
||||
def _get_snapmirrors(self, source_vserver=None, source_volume=None,
|
||||
destination_vserver=None, destination_volume=None,
|
||||
desired_attributes=None):
|
||||
|
||||
query = None
|
||||
if (source_vserver or source_volume or destination_vserver or
|
||||
destination_volume):
|
||||
query = {'snapmirror-info': {}}
|
||||
if source_volume:
|
||||
query['snapmirror-info']['source-volume'] = source_volume
|
||||
if destination_volume:
|
||||
query['snapmirror-info']['destination-volume'] = (
|
||||
destination_volume)
|
||||
if source_vserver:
|
||||
query['snapmirror-info']['source-vserver'] = source_vserver
|
||||
if destination_vserver:
|
||||
query['snapmirror-info']['destination-vserver'] = (
|
||||
destination_vserver)
|
||||
|
||||
api_args = {}
|
||||
if query:
|
||||
api_args['query'] = query
|
||||
if desired_attributes:
|
||||
api_args['desired-attributes'] = desired_attributes
|
||||
|
||||
result = self.send_iter_request('snapmirror-get-iter', api_args)
|
||||
if not self._has_records(result):
|
||||
return []
|
||||
else:
|
||||
return result.get_child_by_name('attributes-list').get_children()
|
||||
|
||||
def get_snapmirrors(self, source_vserver, source_volume,
|
||||
destination_vserver, destination_volume,
|
||||
desired_attributes=None):
|
||||
"""Gets one or more SnapMirror relationships.
|
||||
|
||||
Either the source or destination info may be omitted.
|
||||
Desired attributes should be a flat list of attribute names.
|
||||
"""
|
||||
self._ensure_snapmirror_v2()
|
||||
|
||||
if desired_attributes is not None:
|
||||
desired_attributes = {
|
||||
'snapmirror-info': {attr: None for attr in desired_attributes},
|
||||
}
|
||||
|
||||
result = self._get_snapmirrors(
|
||||
source_vserver=source_vserver,
|
||||
source_volume=source_volume,
|
||||
destination_vserver=destination_vserver,
|
||||
destination_volume=destination_volume,
|
||||
desired_attributes=desired_attributes)
|
||||
|
||||
snapmirrors = []
|
||||
|
||||
for snapmirror_info in result:
|
||||
snapmirror = {}
|
||||
for child in snapmirror_info.get_children():
|
||||
name = self._strip_xml_namespace(child.get_name())
|
||||
snapmirror[name] = child.get_content()
|
||||
snapmirrors.append(snapmirror)
|
||||
|
||||
return snapmirrors
|
||||
|
||||
def get_provisioning_options_from_flexvol(self, flexvol_name):
|
||||
"""Get a dict of provisioning options matching existing flexvol."""
|
||||
|
||||
flexvol_info = self.get_flexvol(flexvol_name=flexvol_name)
|
||||
dedupe_info = self.get_flexvol_dedupe_info(flexvol_name)
|
||||
|
||||
provisioning_opts = {
|
||||
'aggregate': flexvol_info['aggregate'],
|
||||
# space-guarantee can be 'none', 'file', 'volume'
|
||||
'space_guarantee_type': flexvol_info.get('space-guarantee'),
|
||||
'snapshot_policy': flexvol_info['snapshot-policy'],
|
||||
'language': flexvol_info['language'],
|
||||
'dedupe_enabled': dedupe_info['dedupe'],
|
||||
'compression_enabled': dedupe_info['compression'],
|
||||
'snapshot_reserve': flexvol_info['percentage-snapshot-reserve'],
|
||||
'volume_type': flexvol_info['type'],
|
||||
'size': int(math.ceil(float(flexvol_info['size']) / units.Gi)),
|
||||
}
|
||||
|
||||
return provisioning_opts
|
||||
|
@ -129,3 +129,6 @@ class NetApp7modeFibreChannelDriver(driver.BaseVD,
|
||||
return self.library.create_consistencygroup_from_src(
|
||||
group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots,
|
||||
source_cg=source_cg, source_vols=source_vols)
|
||||
|
||||
def failover_host(self, context, volumes, secondary_id=None):
|
||||
raise NotImplementedError()
|
||||
|
@ -129,3 +129,7 @@ class NetAppCmodeFibreChannelDriver(driver.BaseVD,
|
||||
return self.library.create_consistencygroup_from_src(
|
||||
group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots,
|
||||
source_cg=source_cg, source_vols=source_vols)
|
||||
|
||||
def failover_host(self, context, volumes, secondary_id=None):
|
||||
return self.library.failover_host(
|
||||
context, volumes, secondary_id=secondary_id)
|
||||
|
@ -126,3 +126,6 @@ class NetApp7modeISCSIDriver(driver.BaseVD,
|
||||
return self.library.create_consistencygroup_from_src(
|
||||
group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots,
|
||||
source_cg=source_cg, source_vols=source_vols)
|
||||
|
||||
def failover_host(self, context, volumes, secondary_id=None):
|
||||
raise NotImplementedError()
|
||||
|
@ -126,3 +126,7 @@ class NetAppCmodeISCSIDriver(driver.BaseVD,
|
||||
return self.library.create_consistencygroup_from_src(
|
||||
group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots,
|
||||
source_cg=source_cg, source_vols=source_vols)
|
||||
|
||||
def failover_host(self, context, volumes, secondary_id=None):
|
||||
return self.library.failover_host(
|
||||
context, volumes, secondary_id=secondary_id)
|
||||
|
@ -32,6 +32,7 @@ import time
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_service import loopingcall
|
||||
from oslo_utils import units
|
||||
import six
|
||||
from six.moves import urllib
|
||||
@ -49,6 +50,7 @@ from cinder.volume import utils as volume_utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
HOUSEKEEPING_INTERVAL_SECONDS = 600 # ten minutes
|
||||
|
||||
|
||||
@six.add_metaclass(utils.TraceWrapperWithABCMetaclass)
|
||||
@ -76,6 +78,7 @@ class NetAppNfsDriver(driver.ManageableVD,
|
||||
self.configuration.append_config_values(na_opts.netapp_transport_opts)
|
||||
self.configuration.append_config_values(na_opts.netapp_img_cache_opts)
|
||||
self.configuration.append_config_values(na_opts.netapp_nfs_extra_opts)
|
||||
self.backend_name = self.host.split('@')[1]
|
||||
|
||||
def do_setup(self, context):
|
||||
super(NetAppNfsDriver, self).do_setup(context)
|
||||
@ -86,6 +89,20 @@ class NetAppNfsDriver(driver.ManageableVD,
|
||||
def check_for_setup_error(self):
|
||||
"""Returns an error if prerequisites aren't met."""
|
||||
super(NetAppNfsDriver, self).check_for_setup_error()
|
||||
self._start_periodic_tasks()
|
||||
|
||||
def _start_periodic_tasks(self):
|
||||
"""Start recurring tasks common to all Data ONTAP NFS drivers."""
|
||||
|
||||
# Start the task that runs other housekeeping tasks, such as deletion
|
||||
# of previously soft-deleted storage artifacts.
|
||||
housekeeping_periodic_task = loopingcall.FixedIntervalLoopingCall(
|
||||
self._handle_housekeeping_tasks)
|
||||
housekeeping_periodic_task.start(
|
||||
interval=HOUSEKEEPING_INTERVAL_SECONDS, initial_delay=0)
|
||||
|
||||
def _handle_housekeeping_tasks(self):
|
||||
"""Handle various cleanup activities."""
|
||||
|
||||
def get_pool(self, volume):
|
||||
"""Return pool name where volume resides.
|
||||
|
@ -34,23 +34,24 @@ from cinder.i18n import _, _LE, _LI, _LW
|
||||
from cinder.image import image_utils
|
||||
from cinder import interface
|
||||
from cinder import utils
|
||||
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
|
||||
from cinder.volume.drivers.netapp.dataontap import nfs_base
|
||||
from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode
|
||||
from cinder.volume.drivers.netapp.dataontap.utils import capabilities
|
||||
from cinder.volume.drivers.netapp.dataontap.utils import data_motion
|
||||
from cinder.volume.drivers.netapp.dataontap.utils import utils as cmode_utils
|
||||
from cinder.volume.drivers.netapp import options as na_opts
|
||||
from cinder.volume.drivers.netapp import utils as na_utils
|
||||
from cinder.volume import utils as volume_utils
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
QOS_CLEANUP_INTERVAL_SECONDS = 60
|
||||
SSC_UPDATE_INTERVAL_SECONDS = 3600 # hourly
|
||||
|
||||
|
||||
@interface.volumedriver
|
||||
@six.add_metaclass(utils.TraceWrapperWithABCMetaclass)
|
||||
class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
|
||||
class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
|
||||
data_motion.DataMotionMixin):
|
||||
"""NetApp NFS driver for Data ONTAP (Cluster-mode)."""
|
||||
|
||||
REQUIRED_CMODE_FLAGS = ['netapp_vserver']
|
||||
@ -58,34 +59,48 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(NetAppCmodeNfsDriver, self).__init__(*args, **kwargs)
|
||||
self.configuration.append_config_values(na_opts.netapp_cluster_opts)
|
||||
self.failed_over_backend_name = kwargs.get('active_backend_id')
|
||||
self.failed_over = self.failed_over_backend_name is not None
|
||||
self.replication_enabled = (
|
||||
True if self.get_replication_backend_names(
|
||||
self.configuration) else False)
|
||||
|
||||
def do_setup(self, context):
|
||||
"""Do the customized set up on client for cluster mode."""
|
||||
super(NetAppCmodeNfsDriver, self).do_setup(context)
|
||||
na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration)
|
||||
|
||||
self.vserver = self.configuration.netapp_vserver
|
||||
|
||||
self.zapi_client = client_cmode.Client(
|
||||
transport_type=self.configuration.netapp_transport_type,
|
||||
username=self.configuration.netapp_login,
|
||||
password=self.configuration.netapp_password,
|
||||
hostname=self.configuration.netapp_server_hostname,
|
||||
port=self.configuration.netapp_server_port,
|
||||
vserver=self.vserver)
|
||||
# cDOT API client
|
||||
self.zapi_client = cmode_utils.get_client_for_backend(
|
||||
self.failed_over_backend_name or self.backend_name)
|
||||
self.vserver = self.zapi_client.vserver
|
||||
|
||||
# Performance monitoring library
|
||||
self.perf_library = perf_cmode.PerformanceCmodeLibrary(
|
||||
self.zapi_client)
|
||||
|
||||
# Storage service catalog
|
||||
self.ssc_library = capabilities.CapabilitiesLibrary(
|
||||
'nfs', self.vserver, self.zapi_client, self.configuration)
|
||||
|
||||
def _update_zapi_client(self, backend_name):
|
||||
"""Set cDOT API client for the specified config backend stanza name."""
|
||||
|
||||
self.zapi_client = cmode_utils.get_client_for_backend(backend_name)
|
||||
self.vserver = self.zapi_client.vserver
|
||||
self.ssc_library._update_for_failover(self.zapi_client,
|
||||
self._get_flexvol_to_pool_map())
|
||||
ssc = self.ssc_library.get_ssc()
|
||||
self.perf_library._update_for_failover(self.zapi_client, ssc)
|
||||
|
||||
@utils.trace_method
|
||||
def check_for_setup_error(self):
|
||||
"""Check that the driver is working and can communicate."""
|
||||
super(NetAppCmodeNfsDriver, self).check_for_setup_error()
|
||||
self.ssc_library.check_api_permissions()
|
||||
self._start_periodic_tasks()
|
||||
|
||||
def _start_periodic_tasks(self):
|
||||
"""Start recurring tasks for NetApp cDOT NFS driver."""
|
||||
|
||||
# Note(cknight): Run the task once in the current thread to prevent a
|
||||
# race with the first invocation of _update_volume_stats.
|
||||
@ -98,12 +113,31 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
|
||||
interval=SSC_UPDATE_INTERVAL_SECONDS,
|
||||
initial_delay=SSC_UPDATE_INTERVAL_SECONDS)
|
||||
|
||||
# Start the task that harvests soft-deleted QoS policy groups.
|
||||
harvest_qos_periodic_task = loopingcall.FixedIntervalLoopingCall(
|
||||
self.zapi_client.remove_unused_qos_policy_groups)
|
||||
harvest_qos_periodic_task.start(
|
||||
interval=QOS_CLEANUP_INTERVAL_SECONDS,
|
||||
initial_delay=QOS_CLEANUP_INTERVAL_SECONDS)
|
||||
super(NetAppCmodeNfsDriver, self)._start_periodic_tasks()
|
||||
|
||||
def _handle_housekeeping_tasks(self):
|
||||
"""Handle various cleanup activities."""
|
||||
super(NetAppCmodeNfsDriver, self)._handle_housekeeping_tasks()
|
||||
|
||||
# Harvest soft-deleted QoS policy groups
|
||||
self.zapi_client.remove_unused_qos_policy_groups()
|
||||
|
||||
active_backend = self.failed_over_backend_name or self.backend_name
|
||||
|
||||
LOG.debug("Current service state: Replication enabled: %("
|
||||
"replication)s. Failed-Over: %(failed)s. Active Backend "
|
||||
"ID: %(active)s",
|
||||
{
|
||||
'replication': self.replication_enabled,
|
||||
'failed': self.failed_over,
|
||||
'active': active_backend,
|
||||
})
|
||||
|
||||
# Create pool mirrors if whole-backend replication configured
|
||||
if self.replication_enabled and not self.failed_over:
|
||||
self.ensure_snapmirrors(
|
||||
self.configuration, self.backend_name,
|
||||
self.ssc_library.get_ssc_flexvol_names())
|
||||
|
||||
def _do_qos_for_volume(self, volume, extra_specs, cleanup=True):
|
||||
try:
|
||||
@ -166,6 +200,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
|
||||
filter_function=self.get_filter_function(),
|
||||
goodness_function=self.get_goodness_function())
|
||||
data['sparse_copy_volume'] = True
|
||||
data.update(self.get_replication_backend_stats(self.configuration))
|
||||
|
||||
self._spawn_clean_cache_job()
|
||||
self.zapi_client.provide_ems(self, netapp_backend, self._app_version)
|
||||
@ -638,3 +673,8 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver):
|
||||
pass
|
||||
|
||||
super(NetAppCmodeNfsDriver, self).unmanage(volume)
|
||||
|
||||
def failover_host(self, context, volumes, secondary_id=None):
|
||||
"""Failover a backend to a secondary replication target."""
|
||||
|
||||
return self._failover_host(volumes, secondary_id=secondary_id)
|
||||
|
@ -113,6 +113,10 @@ class PerformanceCmodeLibrary(perf_base.PerformanceLibrary):
|
||||
return self.pool_utilization.get(pool_name,
|
||||
perf_base.DEFAULT_UTILIZATION)
|
||||
|
||||
def _update_for_failover(self, zapi_client, ssc_pools):
|
||||
self.zapi_client = zapi_client
|
||||
self.update_performance_cache(ssc_pools)
|
||||
|
||||
def _get_aggregates_for_pools(self, ssc_pools):
|
||||
"""Get the set of aggregates that contain the specified pools."""
|
||||
|
||||
|
@ -88,6 +88,11 @@ class CapabilitiesLibrary(object):
|
||||
|
||||
return copy.deepcopy(self.ssc)
|
||||
|
||||
def get_ssc_flexvol_names(self):
|
||||
"""Get the names of the FlexVols in the Storage Service Catalog."""
|
||||
ssc = self.get_ssc()
|
||||
return ssc.keys()
|
||||
|
||||
def get_ssc_for_flexvol(self, flexvol_name):
|
||||
"""Get map of Storage Service Catalog entries for a single flexvol."""
|
||||
|
||||
@ -133,6 +138,11 @@ class CapabilitiesLibrary(object):
|
||||
|
||||
self.ssc = ssc
|
||||
|
||||
def _update_for_failover(self, zapi_client, flexvol_map):
|
||||
|
||||
self.zapi_client = zapi_client
|
||||
self.update_ssc(flexvol_map)
|
||||
|
||||
def _get_ssc_flexvol_info(self, flexvol_name):
|
||||
"""Gather flexvol info and recast into SSC-style volume stats."""
|
||||
|
||||
|
640
cinder/volume/drivers/netapp/dataontap/utils/data_motion.py
Normal file
640
cinder/volume/drivers/netapp/dataontap/utils/data_motion.py
Normal file
@ -0,0 +1,640 @@
|
||||
# Copyright (c) 2016 Alex Meade. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
NetApp Data ONTAP data motion library.
|
||||
|
||||
This library handles transferring data from a source to a destination. Its
|
||||
responsibility is to handle this as efficiently as possible given the
|
||||
location of the data's source and destination. This includes cloning,
|
||||
SnapMirror, and copy-offload as improvements to brute force data transfer.
|
||||
"""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_utils import excutils
|
||||
|
||||
from cinder import exception
|
||||
from cinder import utils
|
||||
from cinder.i18n import _, _LE, _LI
|
||||
from cinder.objects import fields
|
||||
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
|
||||
from cinder.volume.drivers.netapp.dataontap.utils import utils as config_utils
|
||||
from cinder.volume import utils as volume_utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
ENTRY_DOES_NOT_EXIST = "(entry doesn't exist)"
|
||||
QUIESCE_RETRY_INTERVAL = 5
|
||||
|
||||
|
||||
class DataMotionMixin(object):
|
||||
|
||||
def get_replication_backend_names(self, config):
|
||||
"""Get the backend names for all configured replication targets."""
|
||||
|
||||
backend_names = []
|
||||
|
||||
replication_devices = config.safe_get('replication_device')
|
||||
if replication_devices:
|
||||
for replication_device in replication_devices:
|
||||
backend_id = replication_device.get('backend_id')
|
||||
if backend_id:
|
||||
backend_names.append(backend_id)
|
||||
|
||||
return backend_names
|
||||
|
||||
def get_replication_backend_stats(self, config):
|
||||
"""Get the driver replication info for merging into volume stats."""
|
||||
|
||||
backend_names = self.get_replication_backend_names(config)
|
||||
|
||||
if len(backend_names) > 0:
|
||||
stats = {
|
||||
'replication_enabled': True,
|
||||
'replication_count': len(backend_names),
|
||||
'replication_targets': backend_names,
|
||||
'replication_type': 'async',
|
||||
}
|
||||
else:
|
||||
stats = {'replication_enabled': False}
|
||||
|
||||
return stats
|
||||
|
||||
def _get_replication_aggregate_map(self, src_backend_name,
|
||||
target_backend_name):
|
||||
"""Get the aggregate mapping config between src and destination."""
|
||||
|
||||
aggregate_map = {}
|
||||
|
||||
config = config_utils.get_backend_configuration(src_backend_name)
|
||||
|
||||
all_replication_aggregate_maps = config.safe_get(
|
||||
'netapp_replication_aggregate_map')
|
||||
if all_replication_aggregate_maps:
|
||||
for replication_aggregate_map in all_replication_aggregate_maps:
|
||||
if (replication_aggregate_map.get('backend_id') ==
|
||||
target_backend_name):
|
||||
replication_aggregate_map.pop('backend_id')
|
||||
aggregate_map = replication_aggregate_map
|
||||
break
|
||||
|
||||
return aggregate_map
|
||||
|
||||
def get_snapmirrors(self, src_backend_name, dest_backend_name,
|
||||
src_flexvol_name=None, dest_flexvol_name=None):
|
||||
"""Get info regarding SnapMirror relationship/s for given params."""
|
||||
dest_backend_config = config_utils.get_backend_configuration(
|
||||
dest_backend_name)
|
||||
dest_vserver = dest_backend_config.netapp_vserver
|
||||
dest_client = config_utils.get_client_for_backend(
|
||||
dest_backend_name, vserver_name=dest_vserver)
|
||||
|
||||
src_backend_config = config_utils.get_backend_configuration(
|
||||
src_backend_name)
|
||||
src_vserver = src_backend_config.netapp_vserver
|
||||
|
||||
snapmirrors = dest_client.get_snapmirrors(
|
||||
src_vserver, src_flexvol_name,
|
||||
dest_vserver, dest_flexvol_name,
|
||||
desired_attributes=[
|
||||
'relationship-status',
|
||||
'mirror-state',
|
||||
'source-vserver',
|
||||
'source-volume',
|
||||
'destination-vserver',
|
||||
'destination-volume',
|
||||
'last-transfer-end-timestamp',
|
||||
'lag-time',
|
||||
])
|
||||
return snapmirrors
|
||||
|
||||
def create_snapmirror(self, src_backend_name, dest_backend_name,
|
||||
src_flexvol_name, dest_flexvol_name):
|
||||
"""Set up a SnapMirror relationship b/w two FlexVols (cinder pools)
|
||||
|
||||
1. Create SnapMirror relationship
|
||||
2. Initialize data transfer asynchronously
|
||||
|
||||
If a SnapMirror relationship already exists and is broken off or
|
||||
quiesced, resume and re-sync the mirror.
|
||||
"""
|
||||
dest_backend_config = config_utils.get_backend_configuration(
|
||||
dest_backend_name)
|
||||
dest_vserver = dest_backend_config.netapp_vserver
|
||||
dest_client = config_utils.get_client_for_backend(
|
||||
dest_backend_name, vserver_name=dest_vserver)
|
||||
|
||||
source_backend_config = config_utils.get_backend_configuration(
|
||||
src_backend_name)
|
||||
src_vserver = source_backend_config.netapp_vserver
|
||||
|
||||
# 1. Create destination 'dp' FlexVol if it doesn't exist
|
||||
if not dest_client.flexvol_exists(dest_flexvol_name):
|
||||
self.create_destination_flexvol(src_backend_name,
|
||||
dest_backend_name,
|
||||
src_flexvol_name,
|
||||
dest_flexvol_name)
|
||||
|
||||
# 2. Check if SnapMirror relationship exists
|
||||
existing_mirrors = dest_client.get_snapmirrors(
|
||||
src_vserver, src_flexvol_name, dest_vserver, dest_flexvol_name)
|
||||
|
||||
msg_payload = {
|
||||
'src_vserver': src_vserver,
|
||||
'src_volume': src_flexvol_name,
|
||||
'dest_vserver': dest_vserver,
|
||||
'dest_volume': dest_flexvol_name,
|
||||
}
|
||||
|
||||
# 3. Create and initialize SnapMirror if it doesn't already exist
|
||||
if not existing_mirrors:
|
||||
# TODO(gouthamr): Change the schedule from hourly to a config value
|
||||
msg = ("Creating a SnapMirror relationship between "
|
||||
"%(src_vserver)s:%(src_volume)s and %(dest_vserver)s:"
|
||||
"%(dest_volume)s.")
|
||||
LOG.debug(msg, msg_payload)
|
||||
|
||||
dest_client.create_snapmirror(src_vserver,
|
||||
src_flexvol_name,
|
||||
dest_vserver,
|
||||
dest_flexvol_name,
|
||||
schedule='hourly')
|
||||
|
||||
msg = ("Initializing SnapMirror transfers between "
|
||||
"%(src_vserver)s:%(src_volume)s and %(dest_vserver)s:"
|
||||
"%(dest_volume)s.")
|
||||
LOG.debug(msg, msg_payload)
|
||||
|
||||
# Initialize async transfer of the initial data
|
||||
dest_client.initialize_snapmirror(src_vserver,
|
||||
src_flexvol_name,
|
||||
dest_vserver,
|
||||
dest_flexvol_name)
|
||||
|
||||
# 4. Try to repair SnapMirror if existing
|
||||
else:
|
||||
snapmirror = existing_mirrors[0]
|
||||
if snapmirror.get('mirror-state') != 'snapmirrored':
|
||||
try:
|
||||
msg = ("SnapMirror between %(src_vserver)s:%(src_volume)s "
|
||||
"and %(dest_vserver)s:%(dest_volume)s is in "
|
||||
"'%(state)s' state. Attempting to repair it.")
|
||||
msg_payload['state'] = snapmirror.get('mirror-state')
|
||||
LOG.debug(msg, msg_payload)
|
||||
dest_client.resume_snapmirror(src_vserver,
|
||||
src_flexvol_name,
|
||||
dest_vserver,
|
||||
dest_flexvol_name)
|
||||
dest_client.resync_snapmirror(src_vserver,
|
||||
src_flexvol_name,
|
||||
dest_vserver,
|
||||
dest_flexvol_name)
|
||||
except netapp_api.NaApiError:
|
||||
LOG.exception(_LE("Could not re-sync SnapMirror."))
|
||||
|
||||
def delete_snapmirror(self, src_backend_name, dest_backend_name,
|
||||
src_flexvol_name, dest_flexvol_name, release=True):
|
||||
"""Ensure all information about a SnapMirror relationship is removed.
|
||||
|
||||
1. Abort SnapMirror
|
||||
2. Delete the SnapMirror
|
||||
3. Release SnapMirror to cleanup SnapMirror metadata and snapshots
|
||||
"""
|
||||
dest_backend_config = config_utils.get_backend_configuration(
|
||||
dest_backend_name)
|
||||
dest_vserver = dest_backend_config.netapp_vserver
|
||||
dest_client = config_utils.get_client_for_backend(
|
||||
dest_backend_name, vserver_name=dest_vserver)
|
||||
|
||||
source_backend_config = config_utils.get_backend_configuration(
|
||||
src_backend_name)
|
||||
src_vserver = source_backend_config.netapp_vserver
|
||||
|
||||
# 1. Abort any ongoing transfers
|
||||
try:
|
||||
dest_client.abort_snapmirror(src_vserver,
|
||||
src_flexvol_name,
|
||||
dest_vserver,
|
||||
dest_flexvol_name,
|
||||
clear_checkpoint=False)
|
||||
except netapp_api.NaApiError:
|
||||
# Snapmirror is already deleted
|
||||
pass
|
||||
|
||||
# 2. Delete SnapMirror Relationship and cleanup destination snapshots
|
||||
try:
|
||||
dest_client.delete_snapmirror(src_vserver,
|
||||
src_flexvol_name,
|
||||
dest_vserver,
|
||||
dest_flexvol_name)
|
||||
except netapp_api.NaApiError as e:
|
||||
with excutils.save_and_reraise_exception() as exc_context:
|
||||
if (e.code == netapp_api.EOBJECTNOTFOUND or
|
||||
e.code == netapp_api.ESOURCE_IS_DIFFERENT or
|
||||
ENTRY_DOES_NOT_EXIST in e.message):
|
||||
LOG.info(_LI('No SnapMirror relationship to delete.'))
|
||||
exc_context.reraise = False
|
||||
|
||||
if release:
|
||||
# If the source is unreachable, do not perform the release
|
||||
try:
|
||||
src_client = config_utils.get_client_for_backend(
|
||||
src_backend_name, vserver_name=src_vserver)
|
||||
except Exception:
|
||||
src_client = None
|
||||
# 3. Cleanup SnapMirror relationship on source
|
||||
try:
|
||||
if src_client:
|
||||
src_client.release_snapmirror(src_vserver,
|
||||
src_flexvol_name,
|
||||
dest_vserver,
|
||||
dest_flexvol_name)
|
||||
except netapp_api.NaApiError as e:
|
||||
with excutils.save_and_reraise_exception() as exc_context:
|
||||
if (e.code == netapp_api.EOBJECTNOTFOUND or
|
||||
e.code == netapp_api.ESOURCE_IS_DIFFERENT or
|
||||
ENTRY_DOES_NOT_EXIST in e.message):
|
||||
# Handle the case where the SnapMirror is already
|
||||
# cleaned up
|
||||
exc_context.reraise = False
|
||||
|
||||
def update_snapmirror(self, src_backend_name, dest_backend_name,
|
||||
src_flexvol_name, dest_flexvol_name):
|
||||
"""Schedule a SnapMirror update on the backend."""
|
||||
dest_backend_config = config_utils.get_backend_configuration(
|
||||
dest_backend_name)
|
||||
dest_vserver = dest_backend_config.netapp_vserver
|
||||
dest_client = config_utils.get_client_for_backend(
|
||||
dest_backend_name, vserver_name=dest_vserver)
|
||||
|
||||
source_backend_config = config_utils.get_backend_configuration(
|
||||
src_backend_name)
|
||||
src_vserver = source_backend_config.netapp_vserver
|
||||
|
||||
# Update SnapMirror
|
||||
dest_client.update_snapmirror(src_vserver,
|
||||
src_flexvol_name,
|
||||
dest_vserver,
|
||||
dest_flexvol_name)
|
||||
|
||||
def quiesce_then_abort(self, src_backend_name, dest_backend_name,
|
||||
src_flexvol_name, dest_flexvol_name):
|
||||
"""Quiesce a SnapMirror and wait with retries before aborting."""
|
||||
dest_backend_config = config_utils.get_backend_configuration(
|
||||
dest_backend_name)
|
||||
dest_vserver = dest_backend_config.netapp_vserver
|
||||
dest_client = config_utils.get_client_for_backend(
|
||||
dest_backend_name, vserver_name=dest_vserver)
|
||||
|
||||
source_backend_config = config_utils.get_backend_configuration(
|
||||
src_backend_name)
|
||||
src_vserver = source_backend_config.netapp_vserver
|
||||
|
||||
# 1. Attempt to quiesce, then abort
|
||||
dest_client.quiesce_snapmirror(src_vserver,
|
||||
src_flexvol_name,
|
||||
dest_vserver,
|
||||
dest_flexvol_name)
|
||||
|
||||
retries = (source_backend_config.netapp_snapmirror_quiesce_timeout /
|
||||
QUIESCE_RETRY_INTERVAL)
|
||||
|
||||
@utils.retry(exception.NetAppDriverException,
|
||||
interval=QUIESCE_RETRY_INTERVAL,
|
||||
retries=retries, backoff_rate=1)
|
||||
def wait_for_quiesced():
|
||||
snapmirror = dest_client.get_snapmirrors(
|
||||
src_vserver, src_flexvol_name, dest_vserver,
|
||||
dest_flexvol_name,
|
||||
desired_attributes=['relationship-status', 'mirror-state'])[0]
|
||||
if snapmirror.get('relationship-status') != 'quiesced':
|
||||
msg = _("SnapMirror relationship is not quiesced.")
|
||||
raise exception.NetAppDriverException(reason=msg)
|
||||
|
||||
try:
|
||||
wait_for_quiesced()
|
||||
except exception.NetAppDriverException:
|
||||
dest_client.abort_snapmirror(src_vserver,
|
||||
src_flexvol_name,
|
||||
dest_vserver,
|
||||
dest_flexvol_name,
|
||||
clear_checkpoint=False)
|
||||
|
||||
def break_snapmirror(self, src_backend_name, dest_backend_name,
|
||||
src_flexvol_name, dest_flexvol_name):
|
||||
"""Break SnapMirror relationship.
|
||||
|
||||
1. Quiesce any ongoing SnapMirror transfers
|
||||
2. Wait until SnapMirror finishes transfers and enters quiesced state
|
||||
3. Break SnapMirror
|
||||
4. Mount the destination volume so it is given a junction path
|
||||
"""
|
||||
dest_backend_config = config_utils.get_backend_configuration(
|
||||
dest_backend_name)
|
||||
dest_vserver = dest_backend_config.netapp_vserver
|
||||
dest_client = config_utils.get_client_for_backend(
|
||||
dest_backend_name, vserver_name=dest_vserver)
|
||||
|
||||
source_backend_config = config_utils.get_backend_configuration(
|
||||
src_backend_name)
|
||||
src_vserver = source_backend_config.netapp_vserver
|
||||
|
||||
# 1. Attempt to quiesce, then abort
|
||||
self.quiesce_then_abort(src_backend_name, dest_backend_name,
|
||||
src_flexvol_name, dest_flexvol_name)
|
||||
|
||||
# 2. Break SnapMirror
|
||||
dest_client.break_snapmirror(src_vserver,
|
||||
src_flexvol_name,
|
||||
dest_vserver,
|
||||
dest_flexvol_name)
|
||||
|
||||
# 3. Mount the destination volume and create a junction path
|
||||
dest_client.mount_flexvol(dest_flexvol_name)
|
||||
|
||||
def resync_snapmirror(self, src_backend_name, dest_backend_name,
|
||||
src_flexvol_name, dest_flexvol_name):
|
||||
"""Re-sync (repair / re-establish) SnapMirror relationship."""
|
||||
dest_backend_config = config_utils.get_backend_configuration(
|
||||
dest_backend_name)
|
||||
dest_vserver = dest_backend_config.netapp_vserver
|
||||
dest_client = config_utils.get_client_for_backend(
|
||||
dest_backend_name, vserver_name=dest_vserver)
|
||||
|
||||
source_backend_config = config_utils.get_backend_configuration(
|
||||
src_backend_name)
|
||||
src_vserver = source_backend_config.netapp_vserver
|
||||
|
||||
dest_client.resync_snapmirror(src_vserver,
|
||||
src_flexvol_name,
|
||||
dest_vserver,
|
||||
dest_flexvol_name)
|
||||
|
||||
def resume_snapmirror(self, src_backend_name, dest_backend_name,
|
||||
src_flexvol_name, dest_flexvol_name):
|
||||
"""Resume SnapMirror relationship from a quiesced state."""
|
||||
dest_backend_config = config_utils.get_backend_configuration(
|
||||
dest_backend_name)
|
||||
dest_vserver = dest_backend_config.netapp_vserver
|
||||
dest_client = config_utils.get_client_for_backend(
|
||||
dest_backend_name, vserver_name=dest_vserver)
|
||||
|
||||
source_backend_config = config_utils.get_backend_configuration(
|
||||
src_backend_name)
|
||||
src_vserver = source_backend_config.netapp_vserver
|
||||
|
||||
dest_client.resume_snapmirror(src_vserver,
|
||||
src_flexvol_name,
|
||||
dest_vserver,
|
||||
dest_flexvol_name)
|
||||
|
||||
def create_destination_flexvol(self, src_backend_name, dest_backend_name,
|
||||
src_flexvol_name, dest_flexvol_name):
|
||||
"""Create a SnapMirror mirror target FlexVol for a given source."""
|
||||
dest_backend_config = config_utils.get_backend_configuration(
|
||||
dest_backend_name)
|
||||
dest_vserver = dest_backend_config.netapp_vserver
|
||||
dest_client = config_utils.get_client_for_backend(
|
||||
dest_backend_name, vserver_name=dest_vserver)
|
||||
|
||||
source_backend_config = config_utils.get_backend_configuration(
|
||||
src_backend_name)
|
||||
src_vserver = source_backend_config.netapp_vserver
|
||||
src_client = config_utils.get_client_for_backend(
|
||||
src_backend_name, vserver_name=src_vserver)
|
||||
|
||||
provisioning_options = (
|
||||
src_client.get_provisioning_options_from_flexvol(
|
||||
src_flexvol_name)
|
||||
)
|
||||
|
||||
# Remove size and volume_type
|
||||
size = provisioning_options.pop('size', None)
|
||||
if not size:
|
||||
msg = _("Unable to read the size of the source FlexVol (%s) "
|
||||
"to create a SnapMirror destination.")
|
||||
raise exception.NetAppDriverException(msg % src_flexvol_name)
|
||||
provisioning_options.pop('volume_type', None)
|
||||
|
||||
source_aggregate = provisioning_options.pop('aggregate')
|
||||
aggregate_map = self._get_replication_aggregate_map(
|
||||
src_backend_name, dest_backend_name)
|
||||
|
||||
if not aggregate_map.get(source_aggregate):
|
||||
msg = _("Unable to find configuration matching the source "
|
||||
"aggregate (%s) and the destination aggregate. Option "
|
||||
"netapp_replication_aggregate_map may be incorrect.")
|
||||
raise exception.NetAppDriverException(
|
||||
message=msg % source_aggregate)
|
||||
|
||||
destination_aggregate = aggregate_map[source_aggregate]
|
||||
|
||||
# NOTE(gouthamr): The volume is intentionally created as a Data
|
||||
# Protection volume; junction-path will be added on breaking
|
||||
# the mirror.
|
||||
dest_client.create_flexvol(dest_flexvol_name,
|
||||
destination_aggregate,
|
||||
size,
|
||||
volume_type='dp',
|
||||
**provisioning_options)
|
||||
|
||||
def ensure_snapmirrors(self, config, src_backend_name, src_flexvol_names):
|
||||
"""Ensure all the SnapMirrors needed for whole-backend replication."""
|
||||
backend_names = self.get_replication_backend_names(config)
|
||||
for dest_backend_name in backend_names:
|
||||
for src_flexvol_name in src_flexvol_names:
|
||||
|
||||
dest_flexvol_name = src_flexvol_name
|
||||
|
||||
self.create_snapmirror(src_backend_name,
|
||||
dest_backend_name,
|
||||
src_flexvol_name,
|
||||
dest_flexvol_name)
|
||||
|
||||
def break_snapmirrors(self, config, src_backend_name, src_flexvol_names,
|
||||
chosen_target):
|
||||
"""Break all existing SnapMirror relationships for a given back end."""
|
||||
failed_to_break = []
|
||||
backend_names = self.get_replication_backend_names(config)
|
||||
for dest_backend_name in backend_names:
|
||||
for src_flexvol_name in src_flexvol_names:
|
||||
|
||||
dest_flexvol_name = src_flexvol_name
|
||||
try:
|
||||
self.break_snapmirror(src_backend_name,
|
||||
dest_backend_name,
|
||||
src_flexvol_name,
|
||||
dest_flexvol_name)
|
||||
except netapp_api.NaApiError:
|
||||
msg = _("Unable to break SnapMirror between FlexVol "
|
||||
"%(src)s and Flexvol %(dest)s. Associated volumes "
|
||||
"will have their replication state set to error.")
|
||||
payload = {
|
||||
'src': ':'.join([src_backend_name, src_flexvol_name]),
|
||||
'dest': ':'.join([dest_backend_name,
|
||||
dest_flexvol_name]),
|
||||
}
|
||||
if dest_backend_name == chosen_target:
|
||||
failed_to_break.append(src_flexvol_name)
|
||||
LOG.exception(msg, payload)
|
||||
|
||||
return failed_to_break
|
||||
|
||||
def update_snapmirrors(self, config, src_backend_name, src_flexvol_names):
|
||||
"""Update all existing SnapMirror relationships on a given back end."""
|
||||
backend_names = self.get_replication_backend_names(config)
|
||||
for dest_backend_name in backend_names:
|
||||
for src_flexvol_name in src_flexvol_names:
|
||||
|
||||
dest_flexvol_name = src_flexvol_name
|
||||
try:
|
||||
self.update_snapmirror(src_backend_name,
|
||||
dest_backend_name,
|
||||
src_flexvol_name,
|
||||
dest_flexvol_name)
|
||||
except netapp_api.NaApiError:
|
||||
# Ignore any errors since the current source may be
|
||||
# unreachable
|
||||
pass
|
||||
|
||||
def _choose_failover_target(self, backend_name, flexvols,
|
||||
replication_targets):
|
||||
target_lag_times = []
|
||||
|
||||
for target in replication_targets:
|
||||
all_target_mirrors = self.get_snapmirrors(
|
||||
backend_name, target, None, None)
|
||||
flexvol_mirrors = self._filter_and_sort_mirrors(
|
||||
all_target_mirrors, flexvols)
|
||||
|
||||
if not flexvol_mirrors:
|
||||
msg = ("Ignoring replication target %(target)s because no "
|
||||
"SnapMirrors were found for any of the flexvols "
|
||||
"in (%(flexvols)s).")
|
||||
payload = {
|
||||
'flexvols': ', '.join(flexvols),
|
||||
'target': target,
|
||||
}
|
||||
LOG.debug(msg, payload)
|
||||
continue
|
||||
|
||||
target_lag_times.append(
|
||||
{
|
||||
'target': target,
|
||||
'highest-lag-time': flexvol_mirrors[0]['lag-time'],
|
||||
}
|
||||
)
|
||||
|
||||
# The best target is one with the least 'worst' lag time.
|
||||
best_target = (sorted(target_lag_times,
|
||||
key=lambda x: int(x['highest-lag-time']))[0]
|
||||
if len(target_lag_times) > 0 else {})
|
||||
|
||||
return best_target.get('target')
|
||||
|
||||
def _filter_and_sort_mirrors(self, mirrors, flexvols):
|
||||
"""Return mirrors reverse-sorted by lag time.
|
||||
|
||||
The 'slowest' mirror determines the best update that occurred on a
|
||||
given replication target.
|
||||
"""
|
||||
filtered_mirrors = list(filter(lambda x: x.get('destination-volume')
|
||||
in flexvols, mirrors))
|
||||
sorted_mirrors = sorted(filtered_mirrors,
|
||||
key=lambda x: int(x.get('lag-time')),
|
||||
reverse=True)
|
||||
|
||||
return sorted_mirrors
|
||||
|
||||
def _complete_failover(self, source_backend_name, replication_targets,
|
||||
flexvols, volumes, failover_target=None):
|
||||
"""Failover a backend to a secondary replication target."""
|
||||
volume_updates = []
|
||||
|
||||
active_backend_name = failover_target or self._choose_failover_target(
|
||||
source_backend_name, flexvols, replication_targets)
|
||||
|
||||
if active_backend_name is None:
|
||||
msg = _("No suitable host was found to failover.")
|
||||
raise exception.NetAppDriverException(msg)
|
||||
|
||||
source_backend_config = config_utils.get_backend_configuration(
|
||||
source_backend_name)
|
||||
|
||||
# 1. Start an update to try to get a last minute transfer before we
|
||||
# quiesce and break
|
||||
self.update_snapmirrors(source_backend_config, source_backend_name,
|
||||
flexvols)
|
||||
# 2. Break SnapMirrors
|
||||
failed_to_break = self.break_snapmirrors(source_backend_config,
|
||||
source_backend_name,
|
||||
flexvols, active_backend_name)
|
||||
|
||||
# 3. Update cinder volumes within this host
|
||||
for volume in volumes:
|
||||
replication_status = fields.ReplicationStatus.FAILED_OVER
|
||||
volume_pool = volume_utils.extract_host(volume['host'],
|
||||
level='pool')
|
||||
if volume_pool in failed_to_break:
|
||||
replication_status = 'error'
|
||||
|
||||
volume_update = {
|
||||
'volume_id': volume['id'],
|
||||
'updates': {
|
||||
'replication_status': replication_status,
|
||||
},
|
||||
}
|
||||
volume_updates.append(volume_update)
|
||||
|
||||
return active_backend_name, volume_updates
|
||||
|
||||
def _failover_host(self, volumes, secondary_id=None):
|
||||
|
||||
if secondary_id == self.backend_name:
|
||||
msg = _("Cannot failover to the same host as the primary.")
|
||||
raise exception.InvalidReplicationTarget(reason=msg)
|
||||
|
||||
replication_targets = self.get_replication_backend_names(
|
||||
self.configuration)
|
||||
|
||||
if not replication_targets:
|
||||
msg = _("No replication targets configured for backend "
|
||||
"%s. Cannot failover.")
|
||||
raise exception.InvalidReplicationTarget(reason=msg % self.host)
|
||||
elif secondary_id and secondary_id not in replication_targets:
|
||||
msg = _("%(target)s is not among replication targets configured "
|
||||
"for back end %(host)s. Cannot failover.")
|
||||
payload = {
|
||||
'target': secondary_id,
|
||||
'host': self.host,
|
||||
}
|
||||
raise exception.InvalidReplicationTarget(reason=msg % payload)
|
||||
|
||||
flexvols = self.ssc_library.get_ssc_flexvol_names()
|
||||
|
||||
try:
|
||||
active_backend_name, volume_updates = self._complete_failover(
|
||||
self.backend_name, replication_targets, flexvols, volumes,
|
||||
failover_target=secondary_id)
|
||||
except exception.NetAppDriverException as e:
|
||||
msg = _("Could not complete failover: %s") % e
|
||||
raise exception.UnableToFailOver(reason=msg)
|
||||
|
||||
# Update the ZAPI client to the backend we failed over to
|
||||
self._update_zapi_client(active_backend_name)
|
||||
|
||||
self.failed_over = True
|
||||
self.failed_over_backend_name = active_backend_name
|
||||
|
||||
return active_backend_name, volume_updates
|
74
cinder/volume/drivers/netapp/dataontap/utils/utils.py
Normal file
74
cinder/volume/drivers/netapp/dataontap/utils/utils.py
Normal file
@ -0,0 +1,74 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Utilities for NetApp FAS drivers.
|
||||
|
||||
This module contains common utilities to be used by one or more
|
||||
NetApp FAS drivers to achieve the desired functionality.
|
||||
"""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder import utils
|
||||
from cinder.volume import configuration
|
||||
from cinder.volume import driver
|
||||
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
|
||||
from cinder.volume.drivers.netapp import options as na_opts
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def get_backend_configuration(backend_name):
|
||||
"""Get a cDOT configuration object for a specific backend."""
|
||||
|
||||
config_stanzas = CONF.list_all_sections()
|
||||
if backend_name not in config_stanzas:
|
||||
msg = _("Could not find backend stanza %(backend_name)s in "
|
||||
"configuration. Available stanzas are %(stanzas)s")
|
||||
params = {
|
||||
"stanzas": config_stanzas,
|
||||
"backend_name": backend_name,
|
||||
}
|
||||
raise exception.ConfigNotFound(message=msg % params)
|
||||
|
||||
config = configuration.Configuration(driver.volume_opts,
|
||||
config_group=backend_name)
|
||||
config.append_config_values(na_opts.netapp_proxy_opts)
|
||||
config.append_config_values(na_opts.netapp_connection_opts)
|
||||
config.append_config_values(na_opts.netapp_transport_opts)
|
||||
config.append_config_values(na_opts.netapp_basicauth_opts)
|
||||
config.append_config_values(na_opts.netapp_provisioning_opts)
|
||||
config.append_config_values(na_opts.netapp_cluster_opts)
|
||||
config.append_config_values(na_opts.netapp_san_opts)
|
||||
config.append_config_values(na_opts.netapp_replication_opts)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def get_client_for_backend(backend_name, vserver_name=None):
|
||||
"""Get a cDOT API client for a specific backend."""
|
||||
|
||||
config = get_backend_configuration(backend_name)
|
||||
client = client_cmode.Client(
|
||||
transport_type=config.netapp_transport_type,
|
||||
username=config.netapp_login,
|
||||
password=config.netapp_password,
|
||||
hostname=config.netapp_server_hostname,
|
||||
port=config.netapp_server_port,
|
||||
vserver=vserver_name or config.netapp_vserver,
|
||||
trace=utils.TRACE_API)
|
||||
|
||||
return client
|
@ -25,6 +25,7 @@ place to ensure re usability and better management of configuration options.
|
||||
"""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_config import types
|
||||
|
||||
NETAPP_SIZE_MULTIPLIER_DEFAULT = 1.2
|
||||
|
||||
@ -187,6 +188,30 @@ netapp_san_opts = [
|
||||
'is only utilized when the storage protocol is '
|
||||
'configured to use iSCSI or FC.')), ]
|
||||
|
||||
netapp_replication_opts = [
|
||||
cfg.MultiOpt('netapp_replication_aggregate_map',
|
||||
item_type=types.Dict(),
|
||||
help="Multi opt of dictionaries to represent the aggregate "
|
||||
"mapping between source and destination back ends when "
|
||||
"using whole back end replication. For every "
|
||||
"source aggregate associated with a cinder pool (NetApp "
|
||||
"FlexVol), you would need to specify the destination "
|
||||
"aggregate on the replication target device. A "
|
||||
"replication target device is configured with the "
|
||||
"configuration option replication_device. Specify this "
|
||||
"option as many times as you have replication devices. "
|
||||
"Each entry takes the standard dict config form: "
|
||||
"netapp_replication_aggregate_map = "
|
||||
"backend_id:<name_of_replication_device_section>,"
|
||||
"src_aggr_name1:dest_aggr_name1,"
|
||||
"src_aggr_name2:dest_aggr_name2,..."),
|
||||
cfg.IntOpt('netapp_snapmirror_quiesce_timeout',
|
||||
min=0,
|
||||
default=3600, # One Hour
|
||||
help='The maximum time in seconds to wait for existing '
|
||||
'SnapMirror transfers to complete before aborting '
|
||||
'during a failover.'), ]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(netapp_proxy_opts)
|
||||
CONF.register_opts(netapp_connection_opts)
|
||||
@ -199,3 +224,4 @@ CONF.register_opts(netapp_img_cache_opts)
|
||||
CONF.register_opts(netapp_eseries_opts)
|
||||
CONF.register_opts(netapp_nfs_extra_opts)
|
||||
CONF.register_opts(netapp_san_opts)
|
||||
CONF.register_opts(netapp_replication_opts)
|
||||
|
8
releasenotes/notes/netapp-cDOT-whole-backend-replication-support-59d7537fe3d0eb05.yaml
Normal file
8
releasenotes/notes/netapp-cDOT-whole-backend-replication-support-59d7537fe3d0eb05.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
features:
|
||||
- Added host-level (whole back end replication - v2.1) replication support
|
||||
to the NetApp cDOT drivers (iSCSI, FC, NFS).
|
||||
upgrade:
|
||||
- While configuring NetApp cDOT back ends, new configuration options
|
||||
('replication_device' and 'netapp_replication_aggregate_map') must be
|
||||
added in order to use the host-level failover feature.
|
Loading…
x
Reference in New Issue
Block a user