From a6f48a55eb362b8236d9b11cbd961f28aa6fe1ba Mon Sep 17 00:00:00 2001 From: Soffie Huang Date: Mon, 20 Jun 2016 17:27:26 +0800 Subject: [PATCH] FalconStor: New Cinder driver in Newton This driver supports the following APIs: - Volume Create/Delete - Volume Attach/Detach - Snapshot Create/Delete - Create Volume from Snapshot - Get Volume Stats - Clone Volume - Extend Volume - Consistency Group Create/Delete/Update - Consistency Group Snapshot Create/Delete - Manage/Unmanage existing volume DocImpact Change-Id: Iea3ff7f1b8a055979da10d2d484c8a2ba0c48bac Implements: blueprint falconstor-freestor-cinder-driver --- cinder/opts.py | 5 +- cinder/tests/unit/test_falconstor_fss.py | 895 ++++++++++ cinder/volume/drivers/falconstor/__init__.py | 0 cinder/volume/drivers/falconstor/fc.py | 110 ++ .../volume/drivers/falconstor/fss_common.py | 399 +++++ cinder/volume/drivers/falconstor/iscsi.py | 102 ++ .../volume/drivers/falconstor/rest_proxy.py | 1530 +++++++++++++++++ ...onstor-cinder-driver-dcb61441cd7601c5.yaml | 4 + 8 files changed, 3044 insertions(+), 1 deletion(-) create mode 100644 cinder/tests/unit/test_falconstor_fss.py create mode 100644 cinder/volume/drivers/falconstor/__init__.py create mode 100644 cinder/volume/drivers/falconstor/fc.py create mode 100644 cinder/volume/drivers/falconstor/fss_common.py create mode 100644 cinder/volume/drivers/falconstor/iscsi.py create mode 100644 cinder/volume/drivers/falconstor/rest_proxy.py create mode 100644 releasenotes/notes/falconstor-cinder-driver-dcb61441cd7601c5.yaml diff --git a/cinder/opts.py b/cinder/opts.py index 64c6ca5f2db..c4c0c46c245 100644 --- a/cinder/opts.py +++ b/cinder/opts.py @@ -85,6 +85,8 @@ from cinder.volume.drivers.emc import scaleio as \ from cinder.volume.drivers.emc import xtremio as \ cinder_volume_drivers_emc_xtremio from cinder.volume.drivers import eqlx as cinder_volume_drivers_eqlx +from cinder.volume.drivers.falconstor import fss_common as \ + cinder_volume_drivers_falconstor_fsscommon from cinder.volume.drivers.fujitsu import eternus_dx_common as \ cinder_volume_drivers_fujitsu_eternusdxcommon from cinder.volume.drivers import glusterfs as cinder_volume_drivers_glusterfs @@ -182,8 +184,8 @@ def list_opts(): return [ ('FC-ZONE-MANAGER', itertools.chain( - cinder_zonemanager_fczonemanager.zone_manager_opts, cinder_zonemanager_drivers_brocade_brcdfczonedriver.brcd_opts, + cinder_zonemanager_fczonemanager.zone_manager_opts, cinder_zonemanager_drivers_cisco_ciscofczonedriver.cisco_opts, )), ('KEYMGR', @@ -269,6 +271,7 @@ def list_opts(): cinder_volume_drivers_xio.XIO_OPTS, cinder_volume_drivers_ibm_storwize_svc_storwizesvcfc. storwize_svc_fc_opts, + cinder_volume_drivers_falconstor_fsscommon.FSS_OPTS, cinder_volume_drivers_zfssa_zfssaiscsi.ZFSSA_OPTS, cinder_volume_driver.volume_opts, cinder_volume_driver.iser_opts, diff --git a/cinder/tests/unit/test_falconstor_fss.py b/cinder/tests/unit/test_falconstor_fss.py new file mode 100644 index 00000000000..ad72a0ffb29 --- /dev/null +++ b/cinder/tests/unit/test_falconstor_fss.py @@ -0,0 +1,895 @@ +# Copyright (c) 2016 FalconStor, Inc. +# All Rights Reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from copy import deepcopy +import mock +import time + +from cinder import context +from cinder import exception +from cinder import test +from cinder.volume import configuration as conf +from cinder.volume.drivers.falconstor import fc +from cinder.volume.drivers.falconstor import iscsi +from cinder.volume.drivers.falconstor import rest_proxy as proxy + + +DRIVER_PATH = "cinder.volume.drivers.falconstor" +BASE_DRIVER = DRIVER_PATH + ".fss_common.FalconstorBaseDriver" +ISCSI_DRIVER = DRIVER_PATH + ".iscsi.FSSISCSIDriver" + +PRIMARY_IP = '10.0.0.1' +SECONDARY_IP = '10.0.0.2' +FAKE_ID = 123 +FAKE = 'fake' +FAKE_HOST = 'fakehost' +API_RESPONSE = {'rc': 0} +ISCSI_VOLUME_BACKEND_NAME = "FSSISCSIDriver" +SESSION_ID = "a76d506c-abcd-1234-efgh-710e1fd90527" +VOLUME_ID = '6068ea6d-f221-4213-bde9-f1b50aecdf36' +ADD_VOLUME_ID = '6068ed7f-f231-4283-bge9-f1b51aecdf36' +GROUP_ID = 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7' + +PORTAL_RESPONSE = {'rc': 0, 'ipaddress': FAKE} +VOLUME_METADATA = {'metadata': {'FSS-vid': 1}} +EXTENT_NEW_SIZE = 3 +DATA_SERVER_INFO = 0, {'metadata': {'vendor': 'FalconStor', 'version': '1.5'}} + +FSS_SINGLE_TYPE = 'single' +RAWTIMESTAMP = '1324975390' + +VOLUME = {'id': VOLUME_ID, + 'name': "volume-" + VOLUME_ID, + 'display_name': 'fake_volume', + 'display_description': '', + 'size': 1, + 'host': "hostname@backend#%s" % FAKE_ID, + 'volume_type': None, + 'volume_type_id': None, + 'consistencygroup_id': None, + 'volume_metadata': [], + 'metadata': {"Type": "work"}} + +SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc" +SRC_VOL = { + "name": "volume-" + SRC_VOL_ID, + "id": SRC_VOL_ID, + "display_name": "fake_src_vol", + "size": 1, + "host": "hostname@backend#%s" % FAKE_ID, + "volume_type": None, + "volume_type_id": None, + "volume_size": 1 +} + +VOLUME_NAME = 'cinder-' + VOLUME['id'] +SRC_VOL_NAME = 'cinder-' + SRC_VOL['id'] +DATA_OUTPUT = VOLUME_NAME, VOLUME_METADATA +SNAPSHOT_METADATA = {'fss-tm-comment': None} + +ADD_VOLUME_IN_CG = { + 'id': ADD_VOLUME_ID, + 'display_name': 'abc123', + 'display_description': '', + 'size': 1, + 'consistencygroup_id': GROUP_ID, + 'status': 'available', + 'host': "hostname@backend#%s" % FAKE_ID} + +REMOVE_VOLUME_IN_CG = { + 'id': 'fe2dbc515810451dab2f8c8a48d15bee', + 'display_name': 'fe2dbc515810451dab2f8c8a48d15bee', + 'display_description': '', + 'size': 1, + 'consistencygroup_id': GROUP_ID, + 'status': 'available', + 'host': "hostname@backend#%s" % FAKE_ID} + +CONSISTGROUP = {'id': GROUP_ID, + 'name': 'fake_group', + 'description': 'fake_group_des', + 'status': ''} +CG_SNAPSHOT = { + 'consistencygroup_id': GROUP_ID, + 'id': '3c61b0f9-842e-46bf-b061-5e0031d8083f', + 'name': 'cgsnapshot1', + 'description': 'cgsnapshot1', + 'status': ''} + +SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb" +SNAPSHOT = {'name': "snapshot-" + SNAPSHOT_ID, + 'id': SNAPSHOT_ID, + 'volume_id': VOLUME_ID, + 'volume_name': "volume-" + VOLUME_ID, + 'volume_size': 2, + 'display_name': "fake_snapshot", + 'display_description': '', + 'volume': VOLUME, + 'metadata': SNAPSHOT_METADATA, + 'status': ''} + +INITIATOR_IQN = 'iqn.2015-08.org.falconstor:01:fss' +TARGET_IQN = "iqn.2015-06.com.falconstor:freestor.fss-12345abc" +TARGET_PORT = "3260" +ISCSI_PORT_NAMES = ["ct0.eth2", "ct0.eth3", "ct1.eth2", "ct1.eth3"] +ISCSI_IPS = ["10.0.0." + str(i + 1) for i in range(len(ISCSI_PORT_NAMES))] + +ISCSI_PORTS = {"iqn": TARGET_IQN, "lun": 1} +ISCSI_CONNECTOR = {'initiator': INITIATOR_IQN, + 'host': "hostname@backend#%s" % FAKE_ID} +ISCSI_INFO = { + 'driver_volume_type': 'iscsi', + 'data': { + 'target_discovered': True, + 'discard': True, + 'encrypted': False, + 'qos_specs': None, + 'access_mode': 'rw', + 'volume_id': VOLUME_ID, + 'target_iqn': ISCSI_PORTS['iqn'], + 'target_portal': ISCSI_IPS[0] + ':' + TARGET_PORT, + 'target_lun': 1 + }, +} + +ISCSI_MULTIPATH_INFO = { + 'driver_volume_type': 'iscsi', + 'data''data': { + 'target_discovered': False, + 'discard': True, + 'encrypted': False, + 'qos_specs': None, + 'access_mode': 'rw', + 'volume_id': VOLUME_ID, + 'target_iqns': [ISCSI_PORTS['iqn']], + 'target_portals': [ISCSI_IPS[0] + ':' + TARGET_PORT], + 'target_luns': [1] + }, +} + +FC_INITIATOR_WWPNS = ['2100000d778301c3', '2101000d77a301c3'] +FC_TARGET_WWPNS = ['11000024ff2d2ca4', '11000024ff2d2ca5', + '11000024ff2d2c23', '11000024ff2d2c24'] +FC_WWNS = ['20000024ff2d2ca4', '20000024ff2d2ca5', + '20000024ff2d2c23', '20000024ff2d2c24'] +FC_CONNECTOR = {'ip': '10.10.0.1', + 'initiator': 'iqn.1988-08.org.oracle:568eb4ccbbcc', + 'wwpns': FC_INITIATOR_WWPNS, + 'wwnns': FC_WWNS, + 'host': FAKE_HOST, + 'multipath': False} +FC_INITIATOR_TARGET_MAP = { + FC_INITIATOR_WWPNS[0]: [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]], + FC_INITIATOR_WWPNS[1]: [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]] +} +FC_DEVICE_MAPPING = { + "fabric": { + 'initiator_port_wwn_list': FC_INITIATOR_WWPNS, + 'target_port_wwn_list': FC_WWNS + } +} + +FC_INFO = { + 'driver_volume_type': 'fibre_channel', + 'data': { + 'target_discovered': True, + 'volume_id': VOLUME_ID, + 'target_lun': 1, + 'target_wwn': FC_TARGET_WWPNS, + 'initiator_target_map': FC_INITIATOR_TARGET_MAP + } +} + + +def Fake_sleep(time): + pass + + +class FSSDriverTestCase(test.TestCase): + + def setUp(self): + super(FSSDriverTestCase, self).setUp() + self.mock_config = mock.Mock() + self.mock_config.san_ip = PRIMARY_IP + self.mock_config.san_login = FAKE + self.mock_config.san_password = FAKE + self.mock_config.fss_pool = FAKE_ID + self.mock_config.san_is_local = False + self.mock_config.fss_debug = False + self.mock_config.additional_retry_list = False + self.stubs.Set(time, 'sleep', Fake_sleep) + + +class TestFSSISCSIDriver(FSSDriverTestCase): + def __init__(self, method): + super(TestFSSISCSIDriver, self).__init__(method) + + def setUp(self): + super(TestFSSISCSIDriver, self).setUp() + self.mock_config.use_chap_auth = False + self.mock_config.use_multipath_for_image_xfer = False + self.mock_config.volume_backend_name = ISCSI_VOLUME_BACKEND_NAME + self.driver = iscsi.FSSISCSIDriver(configuration=self.mock_config) + self.mock_utils = mock.Mock() + self.driver.driver_utils = self.mock_utils + + def tearDown(self): + super(TestFSSISCSIDriver, self).tearDown() + + def test_initialized_should_set_fss_info(self): + self.assertEqual(self.driver.proxy.fss_host, + self.driver.configuration.san_ip) + self.assertEqual(self.driver.proxy.fss_username, + self.driver.configuration.san_login) + self.assertEqual(self.driver.proxy.fss_password, + self.driver.configuration.san_password) + self.assertEqual(self.driver.proxy.fss_defined_pool, + self.driver.configuration.fss_pool) + + def test_check_for_setup_error(self): + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.check_for_setup_error) + + @mock.patch.object(proxy.RESTProxy, 'create_vdev', + return_value=DATA_OUTPUT) + def test_create_volume(self, mock_create_vdev): + self.driver.create_volume(VOLUME) + mock_create_vdev.assert_called_once_with(VOLUME) + + @mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name', + return_value=VOLUME_NAME) + def test_extend_volume(self, mock__get_fss_volume_name): + """Volume extended_volume successfully.""" + self.driver.proxy.extend_vdev = mock.Mock() + result = self.driver.extend_volume(VOLUME, EXTENT_NEW_SIZE) + mock__get_fss_volume_name.assert_called_once_with(VOLUME) + self.driver.proxy.extend_vdev.assert_called_once_with(VOLUME_NAME, + VOLUME["size"], + EXTENT_NEW_SIZE) + self.assertIsNone(result) + + @mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name') + def test_clone_volume(self, mock__get_fss_volume_name): + mock__get_fss_volume_name.side_effect = [VOLUME_NAME, SRC_VOL_NAME] + self.driver.proxy.clone_volume = mock.Mock( + return_value=VOLUME_METADATA) + self.driver.proxy.extend_vdev = mock.Mock() + + self.driver.create_cloned_volume(VOLUME, SRC_VOL) + self.driver.proxy.clone_volume.assert_called_with(VOLUME_NAME, + SRC_VOL_NAME) + + mock__get_fss_volume_name.assert_any_call(VOLUME) + mock__get_fss_volume_name.assert_any_call(SRC_VOL) + self.assertEqual(2, mock__get_fss_volume_name.call_count) + + self.driver.proxy.extend_vdev(VOLUME_NAME, VOLUME["size"], + SRC_VOL["size"]) + self.driver.proxy.extend_vdev.assert_called_with(VOLUME_NAME, + VOLUME["size"], + SRC_VOL["size"]) + + @mock.patch.object(proxy.RESTProxy, 'delete_vdev') + def test_delete_volume(self, mock_delete_vdev): + result = self.driver.delete_volume(VOLUME) + mock_delete_vdev.assert_called_once_with(VOLUME) + self.assertIsNone(result) + + @mock.patch.object(proxy.RESTProxy, 'create_snapshot', + return_value=API_RESPONSE) + def test_create_snapshot(self, mock_create_snapshot): + snap_name = SNAPSHOT.get('display_name') + SNAPSHOT_METADATA["fss-tm-comment"] = snap_name + result = self.driver.create_snapshot(SNAPSHOT) + mock_create_snapshot.assert_called_once_with(SNAPSHOT) + self.assertEqual(result, {'metadata': SNAPSHOT_METADATA}) + + @mock.patch.object(proxy.RESTProxy, 'delete_snapshot', + return_value=API_RESPONSE) + def test_delete_snapshot(self, mock_delete_snapshot): + result = self.driver.delete_snapshot(SNAPSHOT) + mock_delete_snapshot.assert_called_once_with(SNAPSHOT) + self.assertIsNone(result) + + @mock.patch.object(proxy.RESTProxy, 'create_volume_from_snapshot', + return_value=(VOLUME_NAME, VOLUME_METADATA)) + @mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name', + return_value=VOLUME_NAME) + def test_create_volume_from_snapshot(self, mock__get_fss_volume_name, + mock_create_volume_from_snapshot): + vol_size = VOLUME['size'] + snap_size = SNAPSHOT['volume_size'] + self.driver.proxy.extend_vdev = mock.Mock() + + self.assertEqual( + self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT), + dict(metadata=VOLUME_METADATA)) + mock_create_volume_from_snapshot.assert_called_once_with(VOLUME, + SNAPSHOT) + + if vol_size != snap_size: + mock__get_fss_volume_name.assert_called_once_with(VOLUME) + self.driver.proxy.extend_vdev(VOLUME_NAME, snap_size, vol_size) + self.driver.proxy.extend_vdev.assert_called_with(VOLUME_NAME, + snap_size, + vol_size) + + @mock.patch.object(proxy.RESTProxy, 'create_group') + def test_create_consistency_group(self, mock_create_group): + ctxt = context.get_admin_context() + model_update = self.driver.create_consistencygroup(ctxt, CONSISTGROUP) + mock_create_group.assert_called_once_with(CONSISTGROUP) + self.assertDictMatch({'status': 'available'}, model_update) + + @mock.patch.object(proxy.RESTProxy, 'destroy_group') + @mock.patch(BASE_DRIVER + ".delete_volume", autospec=True) + def test_delete_consistency_group(self, mock_delete_vdev, + mock_destroy_group): + mock_cgroup = mock.MagicMock() + mock_cgroup.id = FAKE_ID + mock_cgroup['status'] = "deleted" + mock_context = mock.Mock() + mock_volume = mock.MagicMock() + expected_volume_updates = [{ + 'id': mock_volume.id, + 'status': 'deleted' + }] + model_update, volumes = self.driver.delete_consistencygroup( + mock_context, mock_cgroup, [mock_volume]) + + mock_destroy_group.assert_called_with(mock_cgroup) + self.assertEqual(expected_volume_updates, volumes) + self.assertEqual(mock_cgroup['status'], model_update['status']) + mock_delete_vdev.assert_called_with(self.driver, mock_volume) + + @mock.patch.object(proxy.RESTProxy, 'set_group') + def test_update_consistency_group(self, mock_set_group): + ctxt = context.get_admin_context() + add_vols = [ + {'name': 'vol1', 'id': 'vol1', 'display_name': ''}, + {'name': 'vol2', 'id': 'vol2', 'display_name': ''} + ] + remove_vols = [ + {'name': 'vol3', 'id': 'vol3', 'display_name': ''}, + {'name': 'vol4', 'id': 'vol4', 'display_name': ''} + ] + + expected_addvollist = ["cinder-%s" % volume['id'] for volume in + add_vols] + expected_remvollist = ["cinder-%s" % vol['id'] for vol in remove_vols] + + self.driver.update_consistencygroup(ctxt, CONSISTGROUP, + add_volumes=add_vols, + remove_volumes=remove_vols) + mock_set_group.assert_called_with(GROUP_ID, + addvollist=expected_addvollist, + remvollist=expected_remvollist) + + @mock.patch.object(proxy.RESTProxy, 'create_cgsnapshot') + def test_create_cgsnapshot(self, mock_create_cgsnapshot): + mock_cgsnap = CG_SNAPSHOT + mock_context = mock.Mock() + mock_snap = mock.MagicMock() + model_update, snapshots = self.driver.create_cgsnapshot(mock_context, + mock_cgsnap, + [mock_snap]) + mock_create_cgsnapshot.assert_called_once_with(mock_cgsnap) + self.assertEqual({'status': 'available'}, model_update) + expected_snapshot_update = [{ + 'id': mock_snap.id, + 'status': 'available' + }] + self.assertEqual(expected_snapshot_update, snapshots) + + @mock.patch.object(proxy.RESTProxy, 'delete_cgsnapshot') + def test_delete_cgsnapshot(self, mock_delete_cgsnapshot): + mock_cgsnap = mock.Mock() + mock_cgsnap.id = FAKE_ID + mock_cgsnap.status = 'deleted' + mock_context = mock.Mock() + mock_snap = mock.MagicMock() + + model_update, snapshots = self.driver.delete_cgsnapshot(mock_context, + mock_cgsnap, + [mock_snap]) + mock_delete_cgsnapshot.assert_called_once_with(mock_cgsnap) + self.assertEqual({'status': mock_cgsnap.status}, model_update) + + expected_snapshot_update = [dict(id=mock_snap.id, status='deleted')] + self.assertEqual(expected_snapshot_update, snapshots) + + @mock.patch.object(proxy.RESTProxy, 'initialize_connection_iscsi', + return_value=ISCSI_PORTS) + def test_initialize_connection(self, mock_initialize_connection_iscsi): + FSS_HOSTS = [] + FSS_HOSTS.append(PRIMARY_IP) + ret = self.driver.initialize_connection(VOLUME, ISCSI_CONNECTOR) + mock_initialize_connection_iscsi.assert_called_once_with( + VOLUME, + ISCSI_CONNECTOR, + FSS_HOSTS) + result = deepcopy(ISCSI_INFO) + self.assertDictMatch(result, ret) + + @mock.patch.object(proxy.RESTProxy, 'initialize_connection_iscsi') + @mock.patch(ISCSI_DRIVER + "._check_multipath", autospec=True) + def test_initialize_connection_multipath(self, mock__check_multipath, + mock_initialize_connection_iscsi): + fss_hosts = [] + fss_hosts.append(self.mock_config.san_ip) + mock_initialize_connection_iscsi.return_value = ISCSI_PORTS + mock__check_multipath.retuen_value = True + + self.mock_config.use_multipath_for_image_xfer = True + self.mock_config.san_secondary_ip = SECONDARY_IP + multipath_connector = deepcopy(ISCSI_CONNECTOR) + multipath_connector["multipath"] = True + fss_hosts.append(SECONDARY_IP) + + self.driver.initialize_connection(VOLUME, multipath_connector) + mock_initialize_connection_iscsi.assert_called_once_with( + VOLUME, + multipath_connector, + fss_hosts) + + @mock.patch.object(proxy.RESTProxy, 'terminate_connection_iscsi') + def test_terminate_connection(self, mock_terminate_connection_iscsi): + self.driver.terminate_connection(VOLUME, ISCSI_CONNECTOR) + mock_terminate_connection_iscsi.assert_called_once_with( + VOLUME, + ISCSI_CONNECTOR) + + @mock.patch.object(proxy.RESTProxy, '_manage_existing_volume') + @mock.patch.object(proxy.RESTProxy, '_get_existing_volume_ref_vid') + def test_manage_existing(self, mock__get_existing_volume_ref_vid, + mock__manage_existing_volume): + ref_vid = 1 + volume_ref = {'source-id': ref_vid} + self.driver.manage_existing(VOLUME, volume_ref) + mock__get_existing_volume_ref_vid.assert_called_once_with(volume_ref) + mock__manage_existing_volume.assert_called_once_with( + volume_ref['source-id'], VOLUME) + + @mock.patch.object(proxy.RESTProxy, '_get_existing_volume_ref_vid', + return_value=5120) + def test_manage_existing_get_size(self, mock__get_existing_volume_ref_vid): + ref_vid = 1 + volume_ref = {'source-id': ref_vid} + expected_size = 5 + size = self.driver.manage_existing_get_size(VOLUME, volume_ref) + mock__get_existing_volume_ref_vid.assert_called_once_with(volume_ref) + self.assertEqual(expected_size, size) + + @mock.patch.object(proxy.RESTProxy, 'unmanage') + def test_unmanage(self, mock_unmanage): + self.driver.unmanage(VOLUME) + mock_unmanage.assert_called_once_with(VOLUME) + + +class TestFSSFCDriver(FSSDriverTestCase): + + def setUp(self): + super(TestFSSFCDriver, self).setUp() + self.driver = fc.FSSFCDriver(configuration=self.mock_config) + self.driver._lookup_service = mock.Mock() + + @mock.patch.object(proxy.RESTProxy, 'fc_initialize_connection') + def test_initialize_connection(self, mock_fc_initialize_connection): + fss_hosts = [] + fss_hosts.append(PRIMARY_IP) + self.driver.initialize_connection(VOLUME, FC_CONNECTOR) + mock_fc_initialize_connection.assert_called_once_with( + VOLUME, + FC_CONNECTOR, + fss_hosts) + + @mock.patch.object(proxy.RESTProxy, '_check_fc_host_devices_empty', + return_value=False) + @mock.patch.object(proxy.RESTProxy, 'fc_terminate_connection', + return_value=FAKE_ID) + def test_terminate_connection(self, mock_fc_terminate_connection, + mock__check_fc_host_devices_empty): + self.driver.terminate_connection(VOLUME, FC_CONNECTOR) + mock_fc_terminate_connection.assert_called_once_with( + VOLUME, + FC_CONNECTOR) + mock__check_fc_host_devices_empty.assert_called_once_with(FAKE_ID) + + +class TestRESTProxy(test.TestCase): + """Test REST Proxy Driver.""" + + def setUp(self): + super(TestRESTProxy, self).setUp() + configuration = mock.Mock(conf.Configuration) + configuration.san_ip = FAKE + configuration.san_login = FAKE + configuration.san_password = FAKE + configuration.fss_pool = FAKE_ID + configuration.fss_debug = False + configuration.additional_retry_list = None + + self.proxy = proxy.RESTProxy(configuration) + self.FSS_MOCK = mock.MagicMock() + self.proxy.FSS = self.FSS_MOCK + self.FSS_MOCK._fss_request.return_value = API_RESPONSE + self.stubs.Set(time, 'sleep', Fake_sleep) + + def tearDown(self): + super(TestRESTProxy, self).tearDown() + + def test_do_setup(self): + self.proxy.do_setup() + self.FSS_MOCK.fss_login.assert_called_once_with() + self.assertNotEqual(self.proxy.session_id, SESSION_ID) + + def test_create_volume(self): + sizemb = self.proxy._convert_size_to_mb(VOLUME['size']) + volume_name = self.proxy._get_fss_volume_name(VOLUME) + + params = dict(storagepoolid=self.proxy.fss_defined_pool, + sizemb=sizemb, + category="virtual", + name=volume_name) + self.proxy.create_vdev(VOLUME) + self.FSS_MOCK.create_vdev.assert_called_once_with(params) + + @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name', + return_value=FAKE_ID) + def test_extend_volume(self, mock__get_fss_vid_from_name): + size = self.proxy._convert_size_to_mb(EXTENT_NEW_SIZE - VOLUME['size']) + params = dict( + action='expand', + sizemb=size + ) + volume_name = self.proxy._get_fss_volume_name(VOLUME) + self.proxy.extend_vdev(volume_name, VOLUME["size"], EXTENT_NEW_SIZE) + + mock__get_fss_vid_from_name.assert_called_once_with(volume_name, + FSS_SINGLE_TYPE) + self.FSS_MOCK.extend_vdev.assert_called_once_with(FAKE_ID, params) + + @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name', + return_value=FAKE_ID) + def test_delete_volume(self, mock__get_fss_vid_from_name): + volume_name = self.proxy._get_fss_volume_name(VOLUME) + self.proxy.delete_vdev(VOLUME) + mock__get_fss_vid_from_name.assert_called_once_with(volume_name, + FSS_SINGLE_TYPE) + self.FSS_MOCK.delete_vdev.assert_called_once_with(FAKE_ID) + + @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name', + return_value=FAKE_ID) + def test_clone_volume(self, mock__get_fss_vid_from_name): + self.FSS_MOCK.create_mirror.return_value = API_RESPONSE + self.FSS_MOCK.sync_mirror.return_value = API_RESPONSE + mirror_params = dict( + category='virtual', + selectioncriteria='anydrive', + mirrortarget="virtual", + storagepoolid=self.proxy.fss_defined_pool + ) + ret = self.proxy.clone_volume(VOLUME_NAME, SRC_VOL_NAME) + + self.FSS_MOCK.create_mirror.assert_called_once_with(FAKE_ID, + mirror_params) + self.FSS_MOCK.sync_mirror.assert_called_once_with(FAKE_ID) + self.FSS_MOCK.promote_mirror.assert_called_once_with(FAKE_ID, + VOLUME_NAME) + self.assertNotEqual(ret, VOLUME_METADATA) + + @mock.patch.object(proxy.RESTProxy, 'create_vdev_snapshot') + @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name', + return_value=FAKE_ID) + @mock.patch.object(proxy.RESTProxy, '_get_vol_name_from_snap', + return_value=VOLUME_NAME) + def test_create_snapshot(self, mock__get_vol_name_from_snap, + mock__get_fss_vid_from_name, + mock_create_vdev_snapshot): + self.FSS_MOCK._check_if_snapshot_tm_exist.return_value = [ + False, False, SNAPSHOT['volume_size']] + + self.proxy.create_snapshot(SNAPSHOT) + self.FSS_MOCK._check_if_snapshot_tm_exist.assert_called_once_with( + FAKE_ID) + sizemb = self.proxy._convert_size_to_mb(SNAPSHOT['volume_size']) + mock_create_vdev_snapshot.assert_called_once_with(FAKE_ID, sizemb) + self.FSS_MOCK.create_timemark_policy.assert_called_once_with( + FAKE_ID, + storagepoolid=self.proxy.fss_defined_pool) + self.FSS_MOCK.create_timemark.assert_called_once_with( + FAKE_ID, + SNAPSHOT["display_name"]) + + @mock.patch.object(proxy.RESTProxy, '_get_timestamp', + return_value=RAWTIMESTAMP) + @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name', + return_value=FAKE_ID) + @mock.patch.object(proxy.RESTProxy, '_get_vol_name_from_snap', + return_value=VOLUME_NAME) + def test_delete_snapshot(self, mock__get_vol_name_from_snap, + mock__get_fss_vid_from_name, + mock__get_timestamp): + timestamp = '%s_%s' % (FAKE_ID, RAWTIMESTAMP) + + self.proxy.delete_snapshot(SNAPSHOT) + mock__get_vol_name_from_snap.assert_called_once_with(SNAPSHOT) + self.FSS_MOCK.delete_timemark.assert_called_once_with(timestamp) + self.FSS_MOCK.get_timemark.assert_any_call(FAKE_ID) + self.assertEqual(2, self.FSS_MOCK.get_timemark.call_count) + + @mock.patch.object(proxy.RESTProxy, '_get_timestamp') + @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name') + @mock.patch.object(proxy.RESTProxy, '_get_vol_name_from_snap') + def test_create_volume_from_snapshot(self, mock__get_vol_name_from_snap, + mock__get_fss_vid_from_name, + mock__get_timestamp): + tm_info = {"rc": 0, + "data": + { + "guid": "497bad5e-e589-bb0a-e0e7-00004eeac169", + "name": "SANDisk-001", + "total": "1", + "timemark": [ + { + "size": 131072, + "comment": "123test456", + "hastimeview": False, + "priority": "low", + "quiescent": "yes", + "timeviewdata": "notkept", + "rawtimestamp": "1324975390", + "timestamp": "2015-10-11 16:43:10" + }] + } + } + mock__get_vol_name_from_snap.return_value = VOLUME_NAME + new_vol_name = self.proxy._get_fss_volume_name(VOLUME) + mock__get_fss_vid_from_name.return_value = FAKE_ID + + self.FSS_MOCK.get_timemark.return_value = tm_info + mock__get_timestamp.return_value = RAWTIMESTAMP + timestamp = '%s_%s' % (FAKE_ID, RAWTIMESTAMP) + + self.proxy.create_volume_from_snapshot(VOLUME, SNAPSHOT) + self.FSS_MOCK.get_timemark.assert_called_once_with(FAKE_ID) + mock__get_timestamp.assert_called_once_with(tm_info, + SNAPSHOT['display_name']) + self.FSS_MOCK.copy_timemark.assert_called_once_with( + timestamp, + storagepoolid=self.proxy.fss_defined_pool, + name=new_vol_name) + + @mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id') + def test_create_consistency_group(self, mock__get_group_name_from_id): + + mock__get_group_name_from_id.return_value = CONSISTGROUP['name'] + params = dict(name=CONSISTGROUP['name']) + self.proxy.create_group(CONSISTGROUP) + self.FSS_MOCK.create_group.assert_called_once_with(params) + + @mock.patch.object(proxy.RESTProxy, '_get_fss_gid_from_name') + @mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id') + def test_delete_consistency_group(self, mock__get_group_name_from_id, + mock__get_fss_gid_from_name): + mock__get_group_name_from_id.return_value = CONSISTGROUP['name'] + mock__get_fss_gid_from_name.return_value = FAKE_ID + + self.proxy.destroy_group(CONSISTGROUP) + mock__get_group_name_from_id.assert_called_once_with( + CONSISTGROUP['id']) + mock__get_fss_gid_from_name.assert_called_once_with( + CONSISTGROUP['name']) + self.FSS_MOCK.destroy_group.assert_called_once_with(FAKE_ID) + + @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name') + @mock.patch.object(proxy.RESTProxy, '_get_fss_gid_from_name') + @mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id') + def test_update_consistency_group(self, mock__get_group_name_from_id, + mock__get_fss_gid_from_name, + mock__get_fss_vid_from_name): + join_vid_list = [1, 2] + leave_vid_list = [3, 4] + mock__get_group_name_from_id.return_value = CONSISTGROUP['name'] + mock__get_fss_gid_from_name.return_value = FAKE_ID + mock__get_fss_vid_from_name.side_effect = [join_vid_list, + leave_vid_list] + add_vols = [ + {'name': 'vol1', 'id': 'vol1'}, + {'name': 'vol2', 'id': 'vol2'} + ] + remove_vols = [ + {'name': 'vol3', 'id': 'vol3'}, + {'name': 'vol4', 'id': 'vol4'} + ] + expected_addvollist = ["cinder-%s" % volume['id'] for volume in + add_vols] + expected_remvollist = ["cinder-%s" % vol['id'] for vol in remove_vols] + + self.proxy.set_group(CONSISTGROUP, addvollist=expected_addvollist, + remvollist=expected_remvollist) + + if expected_addvollist: + mock__get_fss_vid_from_name.assert_any_call(expected_addvollist) + + if expected_remvollist: + mock__get_fss_vid_from_name.assert_any_call(expected_remvollist) + self.assertEqual(2, mock__get_fss_vid_from_name.call_count) + + join_params = dict() + leave_params = dict() + + join_params.update( + action='join', + virtualdevices=join_vid_list + ) + leave_params.update( + action='leave', + virtualdevices=leave_vid_list + ) + self.FSS_MOCK.set_group.assert_called_once_with(FAKE_ID, join_params, + leave_params) + + @mock.patch.object(proxy.RESTProxy, 'create_vdev_snapshot') + @mock.patch.object(proxy.RESTProxy, 'create_group_timemark') + @mock.patch.object(proxy.RESTProxy, '_get_vdev_id_from_group_id') + @mock.patch.object(proxy.RESTProxy, '_get_fss_gid_from_name') + @mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id') + def test_create_cgsnapshot(self, mock__get_group_name_from_id, + mock__get_fss_gid_from_name, + mock__get_vdev_id_from_group_id, + mock_create_group_timemark, + mock_create_vdev_snapshot + ): + vid_list = [1] + + group_name = "cinder-consisgroup-%s" % CG_SNAPSHOT[ + 'consistencygroup_id'] + mock__get_group_name_from_id.return_value = group_name + mock__get_fss_gid_from_name.return_value = FAKE_ID + mock__get_vdev_id_from_group_id.return_value = vid_list + gsnap_name = self.proxy._encode_name(CG_SNAPSHOT['id']) + self.FSS_MOCK._check_if_snapshot_tm_exist.return_value = ( + False, + False, + 1024) + + self.proxy.create_cgsnapshot(CG_SNAPSHOT) + mock__get_group_name_from_id.assert_called_once_with( + CG_SNAPSHOT['consistencygroup_id']) + mock__get_fss_gid_from_name.assert_called_once_with(group_name) + mock__get_vdev_id_from_group_id.assert_called_once_with(FAKE_ID) + + for vid in vid_list: + self.FSS_MOCK._check_if_snapshot_tm_exist.assert_called_with(vid) + mock_create_vdev_snapshot.assert_called_once_with(vid, 1024) + self.FSS_MOCK.create_timemark_policy.assert_called_once_with( + vid, + storagepoolid=self.proxy.fss_defined_pool) + + mock_create_group_timemark.assert_called_once_with(FAKE_ID, gsnap_name) + + @mock.patch.object(proxy.RESTProxy, 'delete_group_timemark') + @mock.patch.object(proxy.RESTProxy, '_get_fss_group_membercount') + @mock.patch.object(proxy.RESTProxy, '_get_fss_gid_from_name') + @mock.patch.object(proxy.RESTProxy, '_get_group_name_from_id') + def test_delete_cgsnapshot(self, mock__get_group_name_from_id, + mock__get_fss_gid_from_name, + mock__get_fss_group_membercount, + mock_delete_group_timemark): + tm_info = { + "rc": 0, + "data": + { + "name": "GroupTestABC", + "total": 1, + "timemark": [{ + "size": 65536, + "comment": "cinder-PGGwaaaaaaaar+wYV4AMdgIPw", + "priority": "low", + "quiescent": "yes", + "hastimeview": "false", + "timeviewdata": "notkept", + "rawtimestamp": "1324974940", + "timestamp": "2015-10-15 16:35:40"}] + } + } + final_tm_data = { + "rc": 0, + "data": + {"name": "GroupTestABC", + "total": 1, + "timemark": [] + }} + + mock__get_group_name_from_id.return_value = CG_SNAPSHOT[ + 'consistencygroup_id'] + mock__get_fss_gid_from_name.return_value = FAKE_ID + self.FSS_MOCK.get_group_timemark.side_effect = [tm_info, final_tm_data] + encode_snap_name = self.proxy._encode_name(CG_SNAPSHOT['id']) + self.proxy.delete_cgsnapshot(CG_SNAPSHOT) + mock__get_fss_group_membercount.assert_called_once_with(FAKE_ID) + + self.assertEqual(2, self.FSS_MOCK.get_group_timemark.call_count) + self.FSS_MOCK.get_group_timemark.assert_any_call(FAKE_ID) + rawtimestamp = self.proxy._get_timestamp(tm_info, encode_snap_name) + timestamp = '%s_%s' % (FAKE_ID, rawtimestamp) + mock_delete_group_timemark.assert_called_once_with(timestamp) + self.FSS_MOCK.delete_group_timemark_policy.assert_called_once_with( + FAKE_ID) + + @mock.patch.object(proxy.RESTProxy, 'initialize_connection_iscsi') + def test_iscsi_initialize_connection(self, + mock_initialize_connection_iscsi): + fss_hosts = [] + fss_hosts.append(PRIMARY_IP) + self.proxy.initialize_connection_iscsi(VOLUME, ISCSI_CONNECTOR, + fss_hosts) + mock_initialize_connection_iscsi.assert_called_once_with( + VOLUME, + ISCSI_CONNECTOR, + fss_hosts) + + @mock.patch.object(proxy.RESTProxy, 'terminate_connection_iscsi') + def test_iscsi_terminate_connection(self, mock_terminate_connection_iscsi): + self.FSS_MOCK._get_target_info.return_value = (FAKE_ID, INITIATOR_IQN) + + self.proxy.terminate_connection_iscsi(VOLUME, ISCSI_CONNECTOR) + mock_terminate_connection_iscsi.assert_called_once_with( + VOLUME, + ISCSI_CONNECTOR) + + @mock.patch.object(proxy.RESTProxy, 'rename_vdev') + @mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name') + def test_manage_existing(self, mock__get_fss_volume_name, + mock_rename_vdev): + new_vol_name = 'rename-vol' + mock__get_fss_volume_name.return_value = new_vol_name + + self.proxy._manage_existing_volume(FAKE_ID, VOLUME) + mock__get_fss_volume_name.assert_called_once_with(VOLUME) + mock_rename_vdev.assert_called_once_with(FAKE_ID, new_vol_name) + + @mock.patch.object(proxy.RESTProxy, 'list_volume_info') + def test_manage_existing_get_size(self, mock_list_volume_info): + volume_ref = {'source-id': FAKE_ID} + vdev_info = { + "rc": 0, + "data": { + "name": "cinder-2ab1f70a-6c89-432c-84e3-5fa6c187fb92", + "type": "san", + "category": "virtual", + "sizemb": 1020 + }} + + mock_list_volume_info.return_value = vdev_info + self.proxy._get_existing_volume_ref_vid(volume_ref) + mock_list_volume_info.assert_called_once_with(FAKE_ID) + + @mock.patch.object(proxy.RESTProxy, 'rename_vdev') + @mock.patch.object(proxy.RESTProxy, '_get_fss_vid_from_name') + @mock.patch.object(proxy.RESTProxy, '_get_fss_volume_name') + def test_unmanage(self, mock__get_fss_volume_name, + mock__get_fss_vid_from_name, + mock_rename_vdev): + + mock__get_fss_volume_name.return_value = VOLUME_NAME + mock__get_fss_vid_from_name.return_value = FAKE_ID + unmanaged_vol_name = VOLUME_NAME + "-unmanaged" + + self.proxy.unmanage(VOLUME) + mock__get_fss_volume_name.assert_called_once_with(VOLUME) + mock__get_fss_vid_from_name.assert_called_once_with(VOLUME_NAME, + FSS_SINGLE_TYPE) + mock_rename_vdev.assert_called_once_with(FAKE_ID, unmanaged_vol_name) diff --git a/cinder/volume/drivers/falconstor/__init__.py b/cinder/volume/drivers/falconstor/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/volume/drivers/falconstor/fc.py b/cinder/volume/drivers/falconstor/fc.py new file mode 100644 index 00000000000..1bc6a8e7d5d --- /dev/null +++ b/cinder/volume/drivers/falconstor/fc.py @@ -0,0 +1,110 @@ +# Copyright (c) 2016 FalconStor, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Fibre channel Cinder volume driver for FalconStor FSS storage system. + +This driver requires FSS-8.00-8865 or later. +""" + +from oslo_log import log as logging + +from cinder import exception +from cinder.i18n import _, _LE +from cinder import interface +import cinder.volume.driver +from cinder.volume.drivers.falconstor import fss_common +from cinder.zonemanager import utils as fczm_utils + +LOG = logging.getLogger(__name__) + + +@interface.volumedriver +class FSSFCDriver(fss_common.FalconstorBaseDriver, + cinder.volume.driver.FibreChannelDriver): + """Implements commands for FalconStor FSS FC management. + + To enable the driver add the following line to the cinder configuration: + volume_driver=cinder.volume.drivers.falconstor.fc.FSSFCDriver + + Version history: + 1.0.0 - Initial driver + + """ + + VERSION = '1.0.0' + + def __init__(self, *args, **kwargs): + super(FSSFCDriver, self).__init__(*args, **kwargs) + self.gateway_fc_wwns = [] + self._storage_protocol = "FC" + self._backend_name = ( + self.configuration.safe_get('volume_backend_name') or + self.__class__.__name__) + self._lookup_service = fczm_utils.create_lookup_service() + + def do_setup(self, context): + """Any initialization the driver does while starting.""" + super(FSSFCDriver, self).do_setup(context) + self.gateway_fc_wwns = self.proxy.list_fc_target_wwpn() + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + super(FSSFCDriver, self).check_for_setup_error() + if len(self.gateway_fc_wwns) == 0: + msg = _('No FC targets found') + raise exception.InvalidHost(reason=msg) + + def validate_connector(self, connector): + """Check connector for at least one enabled FC protocol.""" + if 'FC' == self._storage_protocol and 'wwpns' not in connector: + LOG.error(_LE('The connector does not contain the required ' + 'information.')) + raise exception.InvalidConnectorException(missing='wwpns') + + @fczm_utils.AddFCZone + def initialize_connection(self, volume, connector): + fss_hosts = [] + fss_hosts.append(self.configuration.san_ip) + target_info = self.proxy.fc_initialize_connection(volume, connector, + fss_hosts) + init_targ_map = self._build_initiator_target_map( + target_info['available_initiator']) + + fc_info = {'driver_volume_type': 'fibre_channel', + 'data': {'target_lun': int(target_info['lun']), + 'target_discovered': True, + 'target_wwn': self.gateway_fc_wwns, + 'initiator_target_map': init_targ_map, + 'volume_id': volume['id'], + } + } + return fc_info + + def _build_initiator_target_map(self, initiator_wwns): + """Build the target_wwns and the initiator target map.""" + init_targ_map = dict.fromkeys(initiator_wwns, self.gateway_fc_wwns) + return init_targ_map + + @fczm_utils.RemoveFCZone + def terminate_connection(self, volume, connector, **kwargs): + host_id = self.proxy.fc_terminate_connection(volume, connector) + fc_info = {"driver_volume_type": "fibre_channel", "data": {}} + if self.proxy._check_fc_host_devices_empty(host_id): + available_initiator, fc_initiators_info = ( + self.proxy._get_fc_client_initiators(connector)) + init_targ_map = self._build_initiator_target_map( + available_initiator) + fc_info["data"] = {"target_wwn": self.gateway_fc_wwns, + "initiator_target_map": init_targ_map} + return fc_info diff --git a/cinder/volume/drivers/falconstor/fss_common.py b/cinder/volume/drivers/falconstor/fss_common.py new file mode 100644 index 00000000000..9d8c9739a59 --- /dev/null +++ b/cinder/volume/drivers/falconstor/fss_common.py @@ -0,0 +1,399 @@ +# Copyright (c) 2016 FalconStor, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Volume driver for FalconStor FSS storage system. + +This driver requires FSS-8.00-8865 or later. +""" + +import math +import re + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import units +import six + +from cinder import exception +from cinder.i18n import _, _LE, _LI, _LW +from cinder.image import image_utils +from cinder.volume.drivers.falconstor import rest_proxy +from cinder.volume.drivers.san import san + +LOG = logging.getLogger(__name__) + +FSS_OPTS = [ + cfg.IntOpt('fss_pool', + default='', + help='FSS pool id in which FalconStor volumes are stored.'), + cfg.BoolOpt('fss_debug', + default=False, + help="Enable HTTP debugging to FSS"), + cfg.StrOpt('additional_retry_list', + default='', + help='FSS additional retry list, separate by ;') +] + +CONF = cfg.CONF +CONF.register_opts(FSS_OPTS) + + +class FalconstorBaseDriver(san.SanDriver): + + def __init__(self, *args, **kwargs): + super(FalconstorBaseDriver, self).__init__(*args, **kwargs) + if self.configuration: + self.configuration.append_config_values(FSS_OPTS) + + self.proxy = rest_proxy.RESTProxy(self.configuration) + self._backend_name = ( + self.configuration.safe_get('volume_backend_name') or 'FalconStor') + self._storage_protocol = 'iSCSI' + + def do_setup(self, context): + self.proxy.do_setup() + LOG.info(_LI('Activate FalconStor cinder volume driver.')) + + def check_for_setup_error(self): + if self.proxy.session_id is None: + msg = (_('FSS cinder volume driver not ready: Unable to determine ' + 'session id.')) + raise exception.VolumeBackendAPIException(data=msg) + + if not self.configuration.fss_pool: + msg = _('Pool is not available in the cinder configuration ' + 'fields.') + raise exception.InvalidHost(reason=msg) + + self._pool_checking(self.configuration.fss_pool) + + def _pool_checking(self, pool_id): + pool_count = 0 + try: + output = self.proxy.list_pool_info(pool_id) + if "name" in output['data']: + pool_count = len(re.findall(rest_proxy.GROUP_PREFIX, + output['data']['name'])) + if pool_count is 0: + msg = (_('The given pool info must include the storage pool ' + 'and naming start with OpenStack-')) + raise exception.VolumeBackendAPIException(data=msg) + except Exception: + msg = (_('Unexpected exception during pool checking.')) + LOG.exception(msg) + raise exception.VolumeBackendAPIException(data=msg) + + def _check_multipath(self): + if self.configuration.use_multipath_for_image_xfer: + if not self.configuration.san_secondary_ip: + msg = (_('The san_secondary_ip param is null.')) + raise exception.VolumeBackendAPIException(data=msg) + output = self.proxy._check_iocluster_state() + if not output: + msg = (_('FSS do not support multipathing.')) + raise exception.VolumeBackendAPIException(data=msg) + return output + else: + return False + + def create_volume(self, volume): + """Creates a volume. + + We use the metadata of the volume to create variety volume. + + Create a thin provisioned volume : + [Usage] create --volume-type FSS --metadata thinprovisioned=true + thinsize= + + Create a LUN that is a Timeview of another LUN at a specified CDP tag: + [Usage] create --volume-type FSS --metadata timeview= + cdptag= volume-size + + Create a LUN that is a Timeview of another LUN at a specified Timemark: + [Usage] create --volume-type FSS --metadata timeview= + rawtimestamp= volume-size + + """ + + volume_metadata = self._get_volume_metadata(volume) + if not volume_metadata: + volume_name, fss_metadata = self.proxy.create_vdev(volume) + else: + if ("timeview" in volume_metadata and + ("cdptag" in volume_metadata) or + ("rawtimestamp" in volume_metadata)): + volume_name, fss_metadata = self.proxy.create_tv_from_cdp_tag( + volume_metadata, volume) + elif ("thinprovisioned" in volume_metadata and + "thinsize" in volume_metadata): + volume_name, fss_metadata = self.proxy.create_thin_vdev( + volume_metadata, volume) + else: + volume_name, fss_metadata = self.proxy.create_vdev(volume) + fss_metadata.update(volume_metadata) + + if type(volume['metadata']) is dict: + fss_metadata.update(volume['metadata']) + if volume['consistencygroup_id']: + self.proxy._add_volume_to_consistency_group( + volume['consistencygroup_id'], + volume_name + ) + return {'metadata': fss_metadata} + + def _get_volume_metadata(self, volume): + volume_metadata = {} + if 'volume_metadata' in volume: + for metadata in volume['volume_metadata']: + volume_metadata[metadata['key']] = metadata['value'] + return volume_metadata + + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + new_vol_name = self.proxy._get_fss_volume_name(volume) + src_name = self.proxy._get_fss_volume_name(src_vref) + vol_size = volume["size"] + src_size = src_vref["size"] + fss_metadata = self.proxy.clone_volume(new_vol_name, src_name) + self.proxy.extend_vdev(new_vol_name, src_size, vol_size) + + if volume['consistencygroup_id']: + self.proxy._add_volume_to_consistency_group( + volume['consistencygroup_id'], + new_vol_name + ) + volume_metadata = self._get_volume_metadata(volume) + fss_metadata.update(volume_metadata) + + if type(volume['metadata']) is dict: + fss_metadata.update(volume['metadata']) + return {'metadata': fss_metadata} + + def extend_volume(self, volume, new_size): + """Extend volume to new_size.""" + volume_name = self.proxy._get_fss_volume_name(volume) + self.proxy.extend_vdev(volume_name, volume["size"], new_size) + + def delete_volume(self, volume): + """Disconnect all hosts and delete the volume""" + try: + self.proxy.delete_vdev(volume) + except rest_proxy.FSSHTTPError as err: + with excutils.save_and_reraise_exception() as ctxt: + ctxt.reraise = False + LOG.warning(_LW("Volume deletion failed with message: %s"), + err.reason) + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + snap_metadata = snapshot["metadata"] + metadata = self.proxy.create_snapshot(snapshot) + snap_metadata.update(metadata) + return {'metadata': snap_metadata} + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + try: + self.proxy.delete_snapshot(snapshot) + except rest_proxy.FSSHTTPError as err: + with excutils.save_and_reraise_exception() as ctxt: + ctxt.reraise = False + LOG.error( + _LE("Snapshot deletion failed with message: %s"), + err.reason) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + vol_size = volume['size'] + snap_size = snapshot['volume_size'] + volume_name, fss_metadata = self.proxy.create_volume_from_snapshot( + volume, snapshot) + + if vol_size != snap_size: + try: + extend_volume_name = self.proxy._get_fss_volume_name(volume) + self.proxy.extend_vdev(extend_volume_name, snap_size, vol_size) + except rest_proxy.FSSHTTPError as err: + with excutils.save_and_reraise_exception() as ctxt: + ctxt.reraise = False + LOG.error(_LE( + "Resizing %(id)s failed with message: %(msg)s. " + "Cleaning volume."), {'id': volume["id"], + 'msg': err.reason}) + + if type(volume['metadata']) is dict: + fss_metadata.update(volume['metadata']) + + if volume['consistencygroup_id']: + self.proxy._add_volume_to_consistency_group( + volume['consistencygroup_id'], + volume_name) + return {'metadata': fss_metadata} + + def ensure_export(self, context, volume): + pass + + def create_export(self, context, volume, connector): + pass + + def remove_export(self, context, volume): + pass + + # Attach/detach volume to instance/host + def attach_volume(self, context, volume, instance_uuid, host_name, + mountpoint): + pass + + def detach_volume(self, context, volume, attachment=None): + pass + + def get_volume_stats(self, refresh=False): + total_capacity = 0 + free_space = 0 + if refresh: + try: + info = self.proxy._get_pools_info() + if info: + total_capacity = int(info['total_capacity_gb']) + used_space = int(info['used_gb']) + free_space = int(total_capacity - used_space) + + data = {"vendor_name": "FalconStor", + "volume_backend_name": self._backend_name, + "driver_version": self.VERSION, + "storage_protocol": self._storage_protocol, + "total_capacity_gb": total_capacity, + "free_capacity_gb": free_space, + "reserved_percentage": 0, + "consistencygroup_support": True + } + + self._stats = data + + except Exception as exc: + LOG.error(_LE('Cannot get volume status %(exc)s.'), + {'exc': exc}) + return self._stats + + def create_consistencygroup(self, context, group): + """Creates a consistencygroup.""" + self.proxy.create_group(group) + model_update = {'status': 'available'} + return model_update + + def delete_consistencygroup(self, context, group, volumes): + """Deletes a consistency group.""" + self.proxy.destroy_group(group) + volume_updates = [] + for volume in volumes: + self.delete_volume(volume) + volume_updates.append({ + 'id': volume.id, + 'status': 'deleted' + }) + + model_update = {'status': group['status']} + return model_update, volume_updates + + def update_consistencygroup(self, context, group, + add_volumes=None, remove_volumes=None): + addvollist = [] + remvollist = [] + if add_volumes: + for volume in add_volumes: + addvollist.append(self.proxy._get_fss_volume_name(volume)) + if remove_volumes: + for volume in remove_volumes: + remvollist.append(self.proxy._get_fss_volume_name(volume)) + + self.proxy.set_group(group['id'], addvollist=addvollist, + remvollist=remvollist) + return None, None, None + + def create_cgsnapshot(self, context, cgsnapshot, snapshots): + """Creates a cgsnapshot.""" + cgsnapshot_id = cgsnapshot['id'] + try: + self.proxy.create_cgsnapshot(cgsnapshot) + except Exception as e: + msg = _('Failed to create cg snapshot %(id)s ' + 'due to %(reason)s.') % {'id': cgsnapshot_id, + 'reason': six.text_type(e)} + raise exception.VolumeBackendAPIException(data=msg) + + snapshot_updates = [] + for snapshot in snapshots: + snapshot_updates.append({ + 'id': snapshot.id, + 'status': 'available' + }) + model_update = {'status': 'available'} + return model_update, snapshot_updates + + def delete_cgsnapshot(self, context, cgsnapshot, snapshots): + """Deletes a cgsnapshot.""" + cgsnapshot_id = cgsnapshot.id + try: + self.proxy.delete_cgsnapshot(cgsnapshot) + except Exception as e: + msg = _('Failed to delete cgsnapshot %(id)s ' + 'due to %(reason)s.') % {'id': cgsnapshot_id, + 'reason': six.text_type(e)} + raise exception.VolumeBackendAPIException(data=msg) + + snapshot_updates = [] + for snapshot in snapshots: + snapshot_updates.append({ + 'id': snapshot.id, + 'status': 'deleted', + }) + model_update = {'status': cgsnapshot.status} + return model_update, snapshot_updates + + def manage_existing(self, volume, existing_ref): + """Convert an existing FSS volume to a Cinder volume. + + We expect a volume id in the existing_ref that matches one in FSS. + """ + volume_metadata = {} + self.proxy._get_existing_volume_ref_vid(existing_ref) + self.proxy._manage_existing_volume(existing_ref['source-id'], volume) + volume_metadata['FSS-vid'] = existing_ref['source-id'] + updates = {'metadata': volume_metadata} + return updates + + def manage_existing_get_size(self, volume, existing_ref): + """Get size of an existing FSS volume. + + We expect a volume id in the existing_ref that matches one in FSS. + """ + sizemb = self.proxy._get_existing_volume_ref_vid(existing_ref) + size = int(math.ceil(float(sizemb) / units.Ki)) + return size + + def unmanage(self, volume): + """Remove Cinder management from FSS volume""" + self.proxy.unmanage(volume) + + def copy_image_to_volume(self, context, volume, image_service, image_id): + with image_utils.temporary_file() as tmp: + image_utils.fetch_verify_image(context, image_service, + image_id, tmp) + image_utils.fetch_to_raw(context, + image_service, + image_id, + tmp, + self.configuration.volume_dd_blocksize, + size=volume['size']) diff --git a/cinder/volume/drivers/falconstor/iscsi.py b/cinder/volume/drivers/falconstor/iscsi.py new file mode 100644 index 00000000000..43612122204 --- /dev/null +++ b/cinder/volume/drivers/falconstor/iscsi.py @@ -0,0 +1,102 @@ +# Copyright (c) 2016 FalconStor, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Volume driver for FalconStor FSS storage system. + +This driver requires FSS-8.00-8865 or later. +""" + +from cinder import interface +import cinder.volume.driver +from cinder.volume.drivers.falconstor import fss_common + +DEFAULT_ISCSI_PORT = 3260 + + +@interface.volumedriver +class FSSISCSIDriver(fss_common.FalconstorBaseDriver, + cinder.volume.driver.ISCSIDriver): + + """Implements commands for FalconStor FSS ISCSI management. + + To enable the driver add the following line to the cinder configuration: + volume_driver=cinder.volume.drivers.falconstor.iscsi.FSSISCSIDriver + + Version history: + 1.0.0 - Initial driver + 1.0.1 - Fix copy_image_to_volume error. + 1.0.2 - Closes-Bug #1554184, add lun id type conversion in + initialize_connection + 1.03 - merge source code + 1.04 - Fixed create_volume_from_snapshot(), create_cloned_volume() + metadata TypeError + 2.0.0 - Mitaka driver + -- fixed consisgroup commands error. + 2.0.1 -- fixed bugs + 2.0.2 -- support Multipath + 3.0.0 - Newton driver + + """ + + VERSION = '3.0.0' + + def __init__(self, *args, **kwargs): + super(FSSISCSIDriver, self).__init__(*args, **kwargs) + self._storage_protocol = "iSCSI" + self._backend_name = ( + self.configuration.safe_get('volume_backend_name') or + self.__class__.__name__) + + def initialize_connection(self, volume, connector, initiator_data=None): + fss_hosts = [] + target_portal = [] + multipath = connector.get('multipath', False) + fss_hosts.append(self.configuration.san_ip) + + if multipath: + if self._check_multipath(): + fss_hosts.append(self.configuration.san_secondary_ip) + else: + multipath = False + + for host in fss_hosts: + iscsi_ip_port = "%s:%d" % (host, DEFAULT_ISCSI_PORT) + target_portal.append(iscsi_ip_port) + + target_info = self.proxy.initialize_connection_iscsi(volume, + connector, + fss_hosts) + properties = {} + properties['target_discovered'] = True + properties['discard'] = True + properties['encrypted'] = False + properties['qos_specs'] = None + properties['access_mode'] = 'rw' + properties['volume_id'] = volume['id'] + properties['target_iqn'] = target_info['iqn'] + properties['target_portal'] = target_portal[0] + properties['target_lun'] = int(target_info['lun']) + + if multipath: + properties['target_iqns'] = [target_info['iqn'], + target_info['iqn']] + properties['target_portals'] = target_portal + properties['target_luns'] = [int(target_info['lun']), + int(target_info['lun'])] + + return {'driver_volume_type': 'iscsi', 'data': properties} + + def terminate_connection(self, volume, connector, **kwargs): + """Terminate connection.""" + self.proxy.terminate_connection_iscsi(volume, connector) diff --git a/cinder/volume/drivers/falconstor/rest_proxy.py b/cinder/volume/drivers/falconstor/rest_proxy.py new file mode 100644 index 00000000000..1604963b5c6 --- /dev/null +++ b/cinder/volume/drivers/falconstor/rest_proxy.py @@ -0,0 +1,1530 @@ +# Copyright (c) 2016 FalconStor, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +import json +import random +import time +import uuid + +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import units +from six.moves import http_client + +from cinder import exception +from cinder.i18n import _, _LI, _LW + + +FSS_BATCH = 'batch' +FSS_PHYSICALRESOURCE = 'physicalresource' +FSS_PHYSICALADAPTER = 'physicaladapter' +FSS_FCCLIENTINITIATORS = 'fcclientinitiators' +FSS_FC_TGT_WWPN = 'fctgtwwpn' +FSS_STORAGE_POOL = 'storagepool' +FSS_LOGICALRESOURCE = 'logicalresource' +FSS_SAN = 'sanresource' +FSS_MIRROR = 'mirror' +FSS_TIMEMARKPOLICY = 'timemarkpolicy' +FSS_TIMEMARK = 'timemark' +FSS_TIMEVIEW = 'timeview' +FSS_SNAPSHOT_RESOURCE = 'snapshotresource' +FSS_SNAPSHOT_GROUP = 'snapshotgroup' +FSS_CLIENT = 'client' +FSS_SANCLIENT = 'sanclient' +FSS_ISCSI_TARGET = 'iscsitarget' +FSS_ISCSI_CLIENT_INITIATORS = 'iscsiclientinitiators' +FSS_SERVER = 'server' +FSS_OPTIONS = 'options' +FSS_PORTAL = 'defaultiscsiportal' +FSS_PROPERTIES = 'properties' +FSS_HOST = 'host' +FSS_RETURN_CODE = 'rcs' +FSS_AUTH = 'auth' +FSS_LOGIN = 'login' +FSS_SINGLE_TYPE = 'single' + + +POST = 'POST' +GET = 'GET' +PUT = 'PUT' +DELETE = 'DELETE' +GROUP_PREFIX = 'OpenStack-' +PRODUCT_NAME = 'ipstor' +SESSION_COOKIE_NAME = 'session_id' +RETRY_LIST = ['107', '2147680512'] + +MAXSNAPSHOTS = 1000 +OPERATION_TIMEOUT = 60 * 60 +RETRY_CNT = 5 +RETRY_INTERVAL = 15 + +LOG = logging.getLogger(__name__) + + +class RESTProxy(object): + def __init__(self, config): + self.fss_host = config.san_ip + self.fss_username = config.san_login + self.fss_password = config.san_password + self.fss_defined_pool = config.fss_pool + if config.additional_retry_list: + RETRY_LIST.append(config.additional_retry_list) + + self.FSS = FSSRestCommon( + host=self.fss_host, + username=self.fss_username, + password=self.fss_password, + fss_debug=config.fss_debug) + self.session_id = None + + # naming + def _get_vol_name_from_snap(self, snapshot): + """Return the name of the snapshot that FSS will use.""" + return "cinder-%s" % snapshot["volume_id"] + + def _get_fss_volume_name(self, volume): + """Return the name of the volume FSS will use.""" + return "cinder-%s" % volume["id"] + + def _get_group_name_from_id(self, id): + return "cinder-consisgroup-%s" % id + + def _encode_name(self, name): + uuid_str = name.replace("-", "") + vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str) + newuuid = (base64.urlsafe_b64encode(vol_uuid.bytes). + decode('utf-8').strip('=')) + return "cinder-%s" % newuuid + + def do_setup(self): + self.session_id = self.FSS.fss_login() + + def _convert_size_to_gb(self, size): + s = round(float(size) / units.Gi, 2) + if s > 0: + return s + else: + return 0 + + def _convert_size_to_mb(self, size): + return size * units.Ki + + def _get_pools_info(self): + qpools = [] + poolinfo = {} + try: + output = self.list_pool_info() + if "storagepools" in output['data']: + for item in output['data']['storagepools']: + if item['name'].startswith(GROUP_PREFIX) and ( + self.fss_defined_pool == item['id']): + poolid = int(item['id']) + qpools.append(poolid) + break + + if not qpools: + msg = _('The storage pool information is empty or not correct') + raise exception.DriverNotInitialized(msg) + + # Query pool detail information + for poolid in qpools: + output = self.list_pool_info(poolid) + poolinfo['pool_name'] = output['data']['name'] + poolinfo['total_capacity_gb'] = ( + self._convert_size_to_gb(output['data']['size'])) + poolinfo['used_gb'] = ( + self._convert_size_to_gb(output['data']['used'])) + poolinfo['QoS_support'] = False + poolinfo['reserved_percentage'] = 0 + except Exception: + msg = (_('Unexpected exception during get pools info.')) + LOG.exception(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return poolinfo + + def list_pool_info(self, pool_id=None): + return self.FSS.list_pool_info(pool_id) + + def list_physicaladapter_info(self, adapter_id=None): + return self.FSS.list_physicaladapter_info(adapter_id) + + def _checking_adapter_type(self, id): + adapter_type = '' + output = self.list_physicaladapter_info() + if "physicaladapters" in output['data']: + physicaladapters = output['data']['physicaladapters'] + if physicaladapters['id'] == id: + adapter_type = physicaladapters['type'] + return adapter_type + + def create_vdev(self, volume): + sizemb = self._convert_size_to_mb(volume["size"]) + volume_name = self._get_fss_volume_name(volume) + params = dict(storagepoolid=self.fss_defined_pool, + category="virtual", + sizemb=sizemb, + name=volume_name) + return volume_name, self.FSS.create_vdev(params) + + def create_tv_from_cdp_tag(self, volume_metadata, volume): + tv_vid = '' + cdp_tag = '' + + if 'cdptag' in volume_metadata: + tv_vid = str(volume_metadata['timeview']) + '_0' + cdp_tag = str(volume_metadata['cdptag']) + + if 'rawtimestamp' in volume_metadata: + tv_vid = '{0}_{1}'.format(str(volume_metadata['timeview']), + str(volume_metadata['rawtimestamp'])) + volume_name = self._get_fss_volume_name(volume) + sizemb = self._convert_size_to_mb(volume['size']) + params = dict(name=volume_name, + storage=dict(storagepoolid=self.fss_defined_pool, + sizemb=sizemb), + automaticexpansion=dict(enabled=False), + timeviewcopy=True) + if cdp_tag: + params.update(cdpjournaltag=cdp_tag) + + metadata = self.FSS.create_timeview(tv_vid, params) + return volume_name, metadata + + def create_thin_vdev(self, volume_metadata, volume): + thin_size = 0 + size = volume["size"] + sizemb = self._convert_size_to_mb(size) + params = dict(storagepoolid=self.fss_defined_pool, + category="virtual") + + if 'thinprovisioned' in volume_metadata: + if volume_metadata['thinprovisioned'] is False: + msg = (_('If you want to create a thin provisioning volume,' + ' this param must be True.')) + raise exception.VolumeBackendAPIException(msg) + + if 'thinsize' in volume_metadata: + thin_size = int(volume_metadata['thinsize']) + + if size < 10: + msg = _('The resource is a FSS thin device, minimum size is ' + '10240 MB.') + raise exception.VolumeBackendAPIException(msg) + else: + try: + if thin_size > size: + msg = _('The allocated size must less than total size.') + raise exception.VolumeBackendAPIException(msg) + except Exception: + msg = _('The resource is a thin device, thin size is invalid.') + raise exception.VolumeBackendAPIException(msg) + + thin_size = self._convert_size_to_mb(thin_size) + thin_disk = dict( + enabled=True, + fullsizemb=sizemb) + params.update(thinprovisioning=thin_disk) + params.update(sizemb=thin_size) + + volume_name = self._get_fss_volume_name(volume) + params.update(name=volume_name) + return volume_name, self.FSS.create_vdev(params) + + def _get_fss_vid_from_name(self, volume_name, fss_type=None): + vid = [] + output = self.FSS.list_fss_volume_info() + try: + if "virtualdevices" in output['data']: + for item in output['data']['virtualdevices']: + if item['name'] in volume_name: + vid.append(item['id']) + except Exception: + msg = (_('Can not find cinder volume - %(volumeName)s') % + {"volumeName": volume_name}) + raise exception.VolumeBackendAPIException(msg) + + if fss_type is not None and fss_type == FSS_SINGLE_TYPE: + vid = ''.join(str(x) for x in vid) + return vid + + def _get_fss_gid_from_name(self, group_name): + gid = '' + output = self.FSS.list_group_info() + if "snapshotgroups" in output['data']: + for item in output['data']['snapshotgroups']: + if item['name'] == group_name: + gid = item['id'] + break + if gid == '': + msg = (_('Can not find consistency group: %s.') % group_name) + raise exception.VolumeBackendAPIException(msg) + return gid + + def _get_fss_group_membercount(self, gid): + membercount = 0 + output = self.FSS.list_group_info(gid) + if "membercount" in output['data']: + membercount = output['data']['membercount'] + return membercount + + def _get_vdev_id_from_group_id(self, group_id): + vidlist = [] + output = self.FSS.list_group_info(group_id) + if "virtualdevices" in output['data']: + for item in output['data']['virtualdevices']: + vidlist.append(item['id']) + return vidlist + + def clone_volume(self, new_vol_name, source_volume_name): + params = dict(storagepoolid=self.fss_defined_pool) + volume_metadata = {} + new_vid = '' + vid = self._get_fss_vid_from_name(source_volume_name, FSS_SINGLE_TYPE) + mirror_params = dict( + category='virtual', + selectioncriteria='anydrive', + mirrortarget="virtual" + ) + mirror_params.update(params) + ret1 = self.FSS.create_mirror(vid, mirror_params) + + if ret1: + if ret1['rc'] != 0: + failed_ret = self.FSS.get_fss_error_code(ret1['rc']) + raise exception.VolumeBackendAPIException(data=failed_ret) + + ret2 = self.FSS.sync_mirror(vid) + self.FSS._random_sleep() + if ret2['rc'] == 0: + self.FSS._check_mirror_sync_finished(vid, OPERATION_TIMEOUT) + ret3 = self.FSS.promote_mirror(vid, new_vol_name) + if ret3 and ret3['rc'] == 0: + new_vid = ret3['id'] + + volume_metadata['FSS-vid'] = new_vid + return volume_metadata + + def delete_vdev(self, volume): + volume_name = self._get_fss_volume_name(volume) + vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) + if vid: + return self.FSS.delete_vdev(vid) + else: + msg = _('vid is null. FSS failed to delete volume.') + raise exception.VolumeBackendAPIException(data=msg) + + def create_snapshot(self, snapshot): + snap_metadata = {} + volume_name = self._get_vol_name_from_snap(snapshot) + snap_name = snapshot["display_name"] + size = snapshot['volume_size'] + vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) + if not vid: + msg = _('vid is null. FSS failed to create snapshot.') + raise exception.VolumeBackendAPIException(data=msg) + + (snap, tm_policy, vdev_size) = (self.FSS. + _check_if_snapshot_tm_exist(vid)) + + if not snap: + self.create_vdev_snapshot(vid, self._convert_size_to_mb(size)) + if not tm_policy: + self.FSS.create_timemark_policy( + vid, storagepoolid=self.fss_defined_pool) + if not snap_name: + snap_name = "snap-%s" % time.strftime('%Y%m%d%H%M%S') + + self.FSS.create_timemark(vid, snap_name) + snap_metadata['fss_tm_comment'] = snap_name + return snap_metadata + + def delete_snapshot(self, snapshot): + volume_name = self._get_vol_name_from_snap(snapshot) + snap_name = snapshot["display_name"] + vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) + if not vid: + msg = _('vid is null. FSS failed to delete snapshot') + raise exception.VolumeBackendAPIException(data=msg) + if not snap_name: + if ('metadata' in snapshot and 'fss_tm_comment' in + snapshot['metadata']): + snap_name = snapshot['metadata']['fss_tm_comment'] + + tm_info = self.FSS.get_timemark(vid) + rawtimestamp = self._get_timestamp(tm_info, snap_name) + if rawtimestamp: + timestamp = '%s_%s' % (vid, rawtimestamp) + self.FSS.delete_timemark(timestamp) + + final_tm_data = self.FSS.get_timemark(vid) + if "timemark" in final_tm_data['data']: + if not final_tm_data['data']['timemark']: + self.FSS.delete_timemark_policy(vid) + self.FSS.delete_vdev_snapshot(vid) + + def _get_timestamp(self, tm_data, encode_snap_name): + timestamp = '' + if "timemark" in tm_data['data']: + for item in tm_data['data']['timemark']: + if "comment" in item and item['comment'] == encode_snap_name: + timestamp = item['rawtimestamp'] + break + return timestamp + + def create_volume_from_snapshot(self, volume, snapshot): + volume_metadata = {} + volume_name = self._get_vol_name_from_snap(snapshot) + snap_name = snapshot["display_name"] + new_vol_name = self._get_fss_volume_name(volume) + vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) + if not vid: + msg = _('vid is null. FSS failed to create_volume_from_snapshot.') + raise exception.VolumeBackendAPIException(data=msg) + + if not snap_name: + if ('metadata' in snapshot) and ('fss_tm_comment' + in snapshot['metadata']): + snap_name = snapshot['metadata']['fss_tm_comment'] + + tm_info = self.FSS.get_timemark(vid) + rawtimestamp = self._get_timestamp(tm_info, snap_name) + if not rawtimestamp: + msg = _('rawtimestamp is null. FSS failed to ' + 'create_volume_from_snapshot.') + raise exception.VolumeBackendAPIException(data=msg) + + timestamp = '%s_%s' % (vid, rawtimestamp) + output = self.FSS.copy_timemark( + timestamp, storagepoolid=self.fss_defined_pool, name=new_vol_name) + if output['rc'] == 0: + vid = output['id'] + self.FSS._random_sleep() + if self.FSS._check_tm_copy_finished(vid, OPERATION_TIMEOUT): + volume_metadata['FSS-vid'] = vid + return volume_name, volume_metadata + + def extend_vdev(self, volume_name, vol_size, new_size): + if new_size > vol_size: + vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) + size = self._convert_size_to_mb(new_size - vol_size) + params = dict( + action='expand', + sizemb=size + ) + return self.FSS.extend_vdev(vid, params) + + def list_volume_info(self, vid): + return self.FSS.list_fss_volume_info(vid) + + def rename_vdev(self, vid, new_vol_name): + params = dict( + action='update', + name=new_vol_name + ) + return self.FSS.rename_vdev(vid, params) + + def assign_iscsi_vdev(self, client_id, target_id, vid): + params = dict( + action="assign", + virtualdeviceids=[vid], + iscsi=dict(target=target_id) + ) + return self.FSS.assign_vdev(client_id, params) + + def assign_fc_vdev(self, client_id, vid): + params = dict( + action="assign", + virtualdeviceids=[vid], + fc=dict( + fcmapping='alltoall', + accessmode='readwritenonexclusive') + ) + return self.FSS.assign_vdev(client_id, params) + + def unassign_vdev(self, client_id, vid): + params = dict( + action="unassign", + virtualdeviceid=vid + ) + return self.FSS.unassign_vdev(client_id, params) + + def _create_vdev_snapshot(self, volume_name, size): + vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) + return self.create_vdev_snapshot(vid, self._convert_size_to_mb(size)) + + def create_vdev_snapshot(self, vid, size): + params = dict( + idlist=[vid], + selectioncriteria='anydrive', + policy='alwayswrite', + sizemb=size, + storagepoolid=self.fss_defined_pool + ) + return self.FSS.create_vdev_snapshot(params) + + def create_group(self, group): + group_name = self._get_group_name_from_id(group['id']) + params = dict( + name=group_name + ) + return self.FSS.create_group(params) + + def destroy_group(self, group): + group_name = self._get_group_name_from_id(group['id']) + gid = self._get_fss_gid_from_name(group_name) + return self.FSS.destroy_group(gid) + + def _add_volume_to_consistency_group(self, group_id, vol_name): + self.set_group(group_id, addvollist=[vol_name]) + + def set_group(self, group_id, **kwargs): + group_name = self._get_group_name_from_id(group_id) + gid = self._get_fss_gid_from_name(group_name) + + join_params = dict() + leave_params = dict() + if kwargs.get('addvollist'): + joing_vid = self._get_fss_vid_from_name(kwargs['addvollist']) + join_params.update( + action='join', + virtualdevices=joing_vid + ) + if kwargs.get('remvollist'): + leave_vid = self._get_fss_vid_from_name(kwargs['remvollist']) + leave_params.update( + action='leave', + virtualdevices=leave_vid + ) + return self.FSS.set_group(gid, join_params, leave_params) + + def create_cgsnapshot(self, cgsnapshot): + group_name = self._get_group_name_from_id( + cgsnapshot['consistencygroup_id']) + gsnap_name = self._encode_name(cgsnapshot['id']) + gid = self._get_fss_gid_from_name(group_name) + vidlist = self._get_vdev_id_from_group_id(gid) + + for vid in vidlist: + (snap, tm_policy, sizemb) = (self.FSS. + _check_if_snapshot_tm_exist(vid)) + if not snap: + self.create_vdev_snapshot(vid, sizemb) + if not tm_policy: + self.FSS.create_timemark_policy( + vid, storagepoolid=self.fss_defined_pool) + + group_tm_policy = self.FSS._check_if_group_tm_enabled(gid) + if not group_tm_policy: + self.create_group_timemark_policy(gid) + + self.create_group_timemark(gid, gsnap_name) + + def create_group_timemark_policy(self, gid): + tm_params = dict( + automatic=dict(enabled=False), + maxtimemarkcount=MAXSNAPSHOTS + ) + return self.FSS.create_group_timemark_policy(gid, tm_params) + + def create_group_timemark(self, gid, gsnap_name): + params = dict( + comment=gsnap_name, + priority='medium', + snapshotnotification=False + ) + return self.FSS.create_group_timemark(gid, params) + + def delete_cgsnapshot(self, cgsnapshot): + group_name = self._get_group_name_from_id( + cgsnapshot['consistencygroup_id']) + encode_snap_name = self._encode_name(cgsnapshot['id']) + gid = self._get_fss_gid_from_name(group_name) + + if not gid: + msg = _('gid is null. FSS failed to delete cgsnapshot.') + raise exception.VolumeBackendAPIException(data=msg) + + if self._get_fss_group_membercount(gid) != 0: + tm_info = self.FSS.get_group_timemark(gid) + rawtimestamp = self._get_timestamp(tm_info, encode_snap_name) + timestamp = '%s_%s' % (gid, rawtimestamp) + self.delete_group_timemark(timestamp) + + final_tm_data = self.FSS.get_group_timemark(gid) + if "timemark" in final_tm_data['data']: + if not final_tm_data['data']['timemark']: + self.FSS.delete_group_timemark_policy(gid) + + def delete_group_timemark(self, timestamp): + params = dict( + deleteallbefore=False + ) + return self.FSS.delete_group_timemark(timestamp, params) + + def _check_iscsi_option(self): + output = self.FSS.get_server_options() + if "iscsitarget" in output['data']: + if not output['data']['iscsitarget']: + self.FSS.set_server_options('iscsitarget') + + def _check_fc_target_option(self): + output = self.FSS.get_server_options() + if "fctarget" in output['data']: + if not output['data']['fctarget']: + self.FSS.set_server_options('fctarget') + + def _check_iocluster_state(self): + output = self.FSS.get_server_options() + if 'iocluster' not in output['data']: + msg = _('No iocluster information in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + return output['data']['iocluster'] + + def list_fc_target_wwpn(self): + return self.FSS.list_fc_target_wwpn() + + def list_fc_client_initiators(self): + return self.FSS.list_fc_client_initiators() + + def create_fc_client(self, cinder_host_name, free_initiator_wwpns): + client_id = 0 + params = dict( + name=cinder_host_name, + protocoltype=["fc"], + ipaddress=self.fss_host, + ostype='linux', + fcpolicy=dict( + initiators=[free_initiator_wwpns], + vsaenabled=False + ) + ) + client_info = self.FSS.create_client(params) + if client_info and client_info['rc'] == 0: + client_id = client_info['id'] + return client_id + + def list_iscsi_target_info(self, target_id=None): + return self.FSS.list_iscsi_target_info(target_id) + + def _check_fc_host_devices_empty(self, client_id): + is_empty = False + output = self.FSS.list_sanclient_info(client_id) + if 'data' not in output: + msg = _('No target in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if 'fcdevices' not in output['data']: + msg = _('No fcdevices in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if len(output['data']['fcdevices']) == 0: + is_empty = True + self.FSS.delete_client(client_id) + return is_empty + + def create_iscsi_client(self, cinder_host_name, initiator): + params = dict( + name=cinder_host_name, + protocoltype=["iscsi"], + ipaddress=self.fss_host, + ostype='linux', + iscsipolicy=dict( + initiators=[initiator], + authentication=dict(enabled=False, + mutualchap=dict(enabled=False)) + ) + ) + return self.FSS.create_client(params) + + def create_iscsitarget(self, client_id, initiator, fss_hosts): + params = dict( + clientid=client_id, + name=initiator, + ipaddress=fss_hosts, + accessmode='readwritenonexclusive' + ) + return self.FSS.create_iscsitarget(params) + + def _get_iscsi_host(self, connector): + target_info = self.list_iscsi_target_info() + if 'data' not in target_info: + msg = _('No data information in return info.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if 'iscsitargets' not in target_info['data']: + msg = _('No iscsitargets in return info.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if target_info['data']['iscsitargets']: + iscsitargets = target_info['data']['iscsitargets'] + for iscsitarget in iscsitargets: + if connector["initiator"] in iscsitarget["name"]: + target_id = iscsitarget["id"] + client_id = iscsitarget["clientid"] + return client_id, target_id + return None, None + + def _create_iscsi_host(self, host_name, initiator, fss_hosts): + client_id = '' + target_id = '' + client_info = self.create_iscsi_client(host_name, initiator) + if client_info and client_info['rc'] == 0: + client_id = client_info['id'] + + target_info = self.create_iscsitarget(client_id, initiator, fss_hosts) + if target_info['rc'] == 0: + target_id = target_info['id'] + return client_id, target_id + + def _get_fc_client_initiators(self, connector): + fc_initiators_assigned = [] + fc_available_initiator = [] + fc_initiators_info = self.list_fc_client_initiators() + if 'data' not in fc_initiators_info: + raise ValueError(_('No data information in return info.')) + + if fc_initiators_info['data']: + fc_initiators = fc_initiators_info['data'] + for fc_initiator in fc_initiators: + if fc_initiator['wwpn'] in connector['wwpns']: + fc_available_initiator.append(str(fc_initiator['wwpn'])) + fc_initiators_assigned.append(dict( + wwpn=str(fc_initiator['wwpn']), + assigned=fc_initiator['assigned'])) + return fc_available_initiator, fc_initiators_assigned + + def fc_initialize_connection(self, volume, connector, fss_hosts): + """Connect the host and volume; return dict describing connection.""" + vid = 0 + fc_target_info = {} + free_fc_initiator = None + + volume_name = self._get_fss_volume_name(volume) + vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) + if not vid: + msg = (_('Can not find cinder volume - %s.') % volume_name) + raise exception.VolumeBackendAPIException(msg) + + available_initiator, fc_initiators_info = ( + self._get_fc_client_initiators(connector)) + + if fc_initiators_info is None: + msg = _('No FC initiator can be added to host.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + for fc_initiator in fc_initiators_info: + value = fc_initiator['assigned'] + if len(value) == 0: + free_fc_initiator = fc_initiator['wwpn'] + + if free_fc_initiator is None: + msg = _('No free FC initiator can be assigned to host.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + initiator = connector["initiator"] + host_name = GROUP_PREFIX + '%s-' % connector["host"] + + initiator_name = initiator.split(':') + idx = len(initiator_name) - 1 + client_host_name = host_name + initiator_name[ + idx] + '_FC-wwpn-' + free_fc_initiator + + client_id = self.create_fc_client(client_host_name, free_fc_initiator) + + try: + self.assign_fc_vdev(client_id, vid) + time.sleep(3) + except FSSHTTPError as err: + with excutils.save_and_reraise_exception() as ctxt: + if (err.code == 2415984845 and "XML_ERROR_CLIENT_EXIST" + in err.text): + ctxt.reraise = False + LOG.warning(_LW('Assign volume failed with message: %(msg)s.'), + {"msg": err.reason}) + finally: + lun = self.FSS._get_fc_client_info(client_id, vid) + + fc_target_info['lun'] = lun + fc_target_info['available_initiator'] = available_initiator + + if not fc_target_info: + msg = _('Failed to get iSCSI target info for the LUN: %s.') + raise exception.VolumeBackendAPIException(data=msg % volume_name) + return fc_target_info + + def fc_terminate_connection(self, volume, connector): + client_id = 0 + volume_name = self._get_fss_volume_name(volume) + vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) + output = self.list_volume_info(vid) + if 'data' not in output: + msg = _('No vdev information in given data') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if 'clients' not in output['data']: + msg = _('No clients in vdev information.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + client_info = output['data']['clients'] + for fcclients in client_info: + client_id = int(fcclients['id']) + + if client_id == 0: + msg = _( + 'Can not find client id. The connection target name is %s.') + raise exception.VolumeBackendAPIException( + data=msg % connector["initiator"]) + try: + self.unassign_vdev(client_id, vid) + except FSSHTTPError as err: + with excutils.save_and_reraise_exception() as ctxt: + if (err.code == 2415984988 and + "XML_ERROR_VIRTUAL_DEV_NOT_ASSIGNED_TO_iSCSI_TARGET" + in err.text): + ctxt.reraise = False + LOG.warning(_LW('Disconnection failed with message: ' + "%(msg)s."), {"msg": err.reason}) + return client_id + + def initialize_connection_iscsi(self, volume, connector, fss_hosts): + """Connect the host and volume; return dict describing connection.""" + vid = 0 + iscsi_target_info = {} + self._check_iscsi_option() + client_id, target_id = self._get_iscsi_host(connector) + + if target_id is None: + initiator = connector["initiator"] + host_name = GROUP_PREFIX + '%s-' % connector["host"] + + initiator_info = initiator.split(':') + idx = len(initiator_info) - 1 + client_host_name = host_name + initiator_info[idx] + + client_id, target_id = self._create_iscsi_host(client_host_name, + initiator, + fss_hosts) + volume_name = self._get_fss_volume_name(volume) + try: + vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) + if not vid: + msg = (_('Can not find cinder volume - %(volumeName)s.') % + {"volumeName": volume_name}) + raise exception.VolumeBackendAPIException(msg) + + self.assign_iscsi_vdev(client_id, target_id, vid) + time.sleep(3) + except FSSHTTPError as err: + with excutils.save_and_reraise_exception() as ctxt: + if (err.code == 2415984989 and + "XML_ERROR_VIRTUAL_DEV_ASSIGNED_TO_iSCSI_TARGET" in + err.text): + ctxt.reraise = False + LOG.warning(_LW("Assign volume failed with message: %(msg)s."), + {"msg": err.reason}) + finally: + (lun, target_name) = self.FSS._get_iscsi_target_info(client_id, + vid) + iscsi_target_info['lun'] = lun + iscsi_target_info['iqn'] = target_name + + if not iscsi_target_info: + msg = _('Failed to get iSCSI target info for the LUN: %s') + raise exception.VolumeBackendAPIException(data=msg % volume_name) + return iscsi_target_info + + def terminate_connection_iscsi(self, volume, connector): + volume_name = self._get_fss_volume_name(volume) + vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) + client_id, target_id = self._get_iscsi_host(connector) + if not client_id: + msg = _('Can not find client id. The connection target name ' + 'is %s.') + raise exception.VolumeBackendAPIException( + data=msg % connector["initiator"]) + try: + self.unassign_vdev(client_id, vid) + except FSSHTTPError as err: + with excutils.save_and_reraise_exception() as ctxt: + if (err.code == 2415984988 and + "XML_ERROR_VIRTUAL_DEV_NOT_ASSIGNED_TO_iSCSI_TARGET" + in err.text): + ctxt.reraise = False + LOG.warning(_LW("Disconnection failed with message: " + "%(msg)s."), {"msg": err.reason}) + finally: + is_empty = self.FSS._check_host_mapping_status(client_id, + target_id) + + if is_empty: + self.FSS.delete_iscsi_target(target_id) + self.FSS.delete_client(client_id) + + def _get_existing_volume_ref_vid(self, existing_ref): + if 'source-id' in existing_ref: + vid = existing_ref['source-id'] + else: + reason = _("FSSISCSIDriver manage_existing requires vid to " + "identify an existing volume.") + raise exception.ManageExistingInvalidReference( + existing_ref=existing_ref, reason=reason) + vdev_info = self.list_volume_info(vid) + if not vdev_info: + raise exception.ManageExistingInvalidReference( + existing_ref=existing_ref, + reason=_("Unable to find volume with FSS vid =%s.") % vid) + + if 'data' not in vdev_info: + msg = _('No vdev information in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if 'sizemb' not in vdev_info['data']: + msg = _('No vdev sizemb in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + return vdev_info['data']['sizemb'] + + def _manage_existing_volume(self, vid, volume): + new_vol_name = self._get_fss_volume_name(volume) + try: + self.rename_vdev(vid, new_vol_name) + except FSSHTTPError as err: + with excutils.save_and_reraise_exception() as ctxt: + ctxt.reraise = False + LOG.warning(_LW("Volume manage_existing_volume was unable " + "to rename the volume, error message: %s."), + err.reason) + + def unmanage(self, volume): + volume_name = self._get_fss_volume_name(volume) + unmanaged_vol_name = volume_name + "-unmanaged" + try: + vid = self._get_fss_vid_from_name(volume_name, FSS_SINGLE_TYPE) + self.rename_vdev(vid, unmanaged_vol_name) + except FSSHTTPError as err: + LOG.warning(_LW("Volume unmanage was unable to rename the volume," + " error message: %(msg)s."), {"msg": err.reason}) + + +class FSSRestCommon(object): + def __init__(self, host, username, password, fss_debug): + self.hostip = host + self.username = username + self.password = password + self.session_id = None + self.fss_debug = fss_debug + + def _fss_request(self, method, path, data=None): + json_data = None + url = "http://%(ip)s/%(product)s/%(path)s" % { + "ip": self.hostip, "product": PRODUCT_NAME, "path": path} + headers = {"Content-Type": "application/json"} + if self.session_id is not None: + cookie = dict( + Cookie=SESSION_COOKIE_NAME + '=' + self.session_id + ) + headers.update(cookie) + + if data is not None: + request_body = json.dumps(data).encode("utf-8") + else: + request_body = None + + connection = http_client.HTTPConnection(self.hostip, 80, timeout=60) + + if self.fss_debug: + LOG.info(_LI("[FSS_RESTAPI]====%(method)s@url=%(url)s ====" + "@request_body=%(body)s===") % { + "method": method, + "url": url, + "body": request_body}) + + attempt = 1 + while True: + connection.request(method, url, request_body, headers) + response = connection.getresponse() + response_body = response.read() + if response_body: + try: + data = json.loads(response_body) + json_data = json.dumps(data) + json_data = json.loads(json_data.decode('utf8')) + except ValueError: + pass + + if self.fss_debug: + LOG.info(_LI("[FSS_RESTAPI]==@json_data: %s =="), json_data) + + if response.status == 200: + return json_data + elif response.status == 404: + msg = (_('FSS rest api return failed, method=%(method)s, ' + 'uri=%(url)s, response=%(response)s') % { + "method": method, + "url": url, + "response": response_body}) + raise exception.VolumeBackendAPIException(msg) + else: + err_code = json_data['rc'] + if (attempt > RETRY_CNT) or (str(err_code) not in RETRY_LIST): + err_target = ("method=%(method)s, url=%(url)s, " + "response=%(response)s" % + {"method": method, "url": url, + "response": response_body}) + err_response = self.get_fss_error_code(err_code) + err = dict( + code=err_code, + text=err_response['key'], + reason=err_response['message'] + ) + raise FSSHTTPError(err_target, err) + attempt += 1 + LOG.warning(_LW("Retry with rc: %s."), err_code) + self._random_sleep(RETRY_INTERVAL) + if err_code == 107: + self.fss_login() + + def _random_sleep(self, interval=60): + nsleep = random.randint(10, interval * 10) + value = round(float(nsleep) / 10, 2) + time.sleep(value) + + # + # REST API session management methods + # + def fss_login(self): + url = '%s/%s' % (FSS_AUTH, FSS_LOGIN) + params = dict( + username=self.username, + password=self.password, + server=self.hostip + ) + data = self._fss_request(POST, url, params) + if 'id' in data: + self.session_id = data['id'] + return self.session_id + + # + # Physical Adapters management methods + # + + def list_physicaladapter_info(self, adapter_id=None): + url = '%s/%s' % (FSS_PHYSICALRESOURCE, FSS_PHYSICALADAPTER) + if adapter_id is not None: + url = '%s/%s/%s' % (FSS_PHYSICALRESOURCE, + FSS_PHYSICALADAPTER, adapter_id) + return self._fss_request(GET, url) + + def list_fc_target_wwpn(self): + url = '%s/%s/%s' % (FSS_PHYSICALRESOURCE, FSS_PHYSICALADAPTER, + FSS_FC_TGT_WWPN) + tgt_wwpn = [] + output = self._fss_request(GET, url) + if output['data']: + tgt_wwpns = output['data'] + for tgt_alias_wwpn in tgt_wwpns: + tgt_wwpn.append( + str(tgt_alias_wwpn['aliaswwpn'].replace('-', ''))) + return tgt_wwpn + + def list_fc_client_initiators(self): + url = '%s/%s/%s' % (FSS_PHYSICALRESOURCE, FSS_PHYSICALADAPTER, + FSS_FCCLIENTINITIATORS) + return self._fss_request(GET, url) + + # + # storage pool management methods + # + + def list_pool_info(self, pool_id=None): + url = '%s/%s' % (FSS_PHYSICALRESOURCE, FSS_STORAGE_POOL) + if pool_id is not None: + url = '%s/%s/%s' % (FSS_PHYSICALRESOURCE, + FSS_STORAGE_POOL, pool_id) + return self._fss_request(GET, url) + + # + # Volume and snapshot management methods + # + + def create_vdev(self, params): + metadata = {} + url = '%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN) + output = self._fss_request(POST, url, params) + if output: + if output['rc'] == 0: + metadata['FSS-vid'] = output['id'] + return metadata + + def _check_mirror_sync_finished(self, vid, timeout): + starttime = time.time() + while True: + self._random_sleep() + if time.time() > starttime + timeout: + msg = (_('FSS get mirror sync timeout on vid: %s ') % vid) + raise exception.VolumeBackendAPIException(data=msg) + elif self._check_mirror_sync_status(vid): + break + + def delete_vdev(self, vid): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN, vid) + return self._fss_request(DELETE, url, dict(force=True)) + + def extend_vdev(self, vid, params): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN, vid) + return self._fss_request(PUT, url, params) + + def rename_vdev(self, vid, params): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN, vid) + return vid, self._fss_request(PUT, url, params) + + def list_fss_volume_info(self, vid=None): + url = '%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN) + if vid is not None: + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SAN, vid) + return self._fss_request(GET, url) + + def _get_fss_vid_from_name(self, volume_name, fss_type=None): + vid = [] + output = self.list_fss_volume_info() + try: + if "virtualdevices" in output['data']: + for item in output['data']['virtualdevices']: + if item['name'] in volume_name: + vid.append(item['id']) + except Exception: + msg = (_('Can not find cinder volume - %s') % volume_name) + raise exception.VolumeBackendAPIException(msg) + + if fss_type is not None and fss_type == FSS_SINGLE_TYPE: + vid = ''.join(str(x) for x in vid) + return vid + + def _check_if_snapshot_tm_exist(self, vid): + snapshotenabled = False + timemarkenabled = False + sizemb = 0 + output = self.list_fss_volume_info(vid) + if "snapshotenabled" in output['data']: + snapshotenabled = output['data']['snapshotenabled'] + if "timemarkenabled" in output['data']: + timemarkenabled = output['data']['timemarkenabled'] + if "sizemb" in output['data']: + sizemb = output['data']['sizemb'] + return (snapshotenabled, timemarkenabled, sizemb) + + def create_vdev_snapshot(self, params): + url = '%s/%s/%s' % (FSS_BATCH, FSS_LOGICALRESOURCE, + FSS_SNAPSHOT_RESOURCE) + return self._fss_request(POST, url, params) + + def create_timemark_policy(self, vid, **kwargs): + url = '%s/%s/%s' % (FSS_BATCH, FSS_LOGICALRESOURCE, FSS_TIMEMARKPOLICY) + params = dict( + idlist=[vid], + automatic=dict(enabled=False), + maxtimemarkcount=MAXSNAPSHOTS + ) + if kwargs.get('storagepoolid'): + params.update(kwargs) + return self._fss_request(POST, url, params) + + def create_timemark(self, vid, snap_name): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_TIMEMARK, vid) + params = dict( + comment=snap_name, + priority='medium', + snapshotnotification=False + ) + return self._fss_request(POST, url, params) + + def get_timemark(self, vid): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_TIMEMARK, vid) + return self._fss_request(GET, url) + + def delete_timemark(self, timestamp): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_TIMEMARK, timestamp) + params = dict( + deleteallbefore=False + ) + return self._fss_request(DELETE, url, params) + + def delete_timemark_policy(self, vid): + url = '%s/%s/%s' % (FSS_BATCH, FSS_LOGICALRESOURCE, FSS_TIMEMARKPOLICY) + params = dict( + idlist=[vid] + ) + return self._fss_request(DELETE, url, params) + + def delete_vdev_snapshot(self, vid): + url = '%s/%s/%s' % (FSS_BATCH, FSS_LOGICALRESOURCE, + FSS_SNAPSHOT_RESOURCE) + params = dict( + idlist=[vid] + ) + return self._fss_request(DELETE, url, params) + + def copy_timemark(self, timestamp, **kwargs): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_TIMEMARK, timestamp) + params = dict( + action='copy', + includetimeviewdata=False + ) + params.update(kwargs) + return self._fss_request(PUT, url, params) + + def get_timemark_copy_status(self, vid): + url = '%s/%s/%s?type=operationstatus' % ( + FSS_LOGICALRESOURCE, FSS_TIMEMARK, vid) + return self._fss_request(GET, url) + + def _check_tm_copy_status(self, vid): + finished = False + output = self.get_timemark_copy_status(vid) + if output['timemarkoperationstatus']: + timemark_status = output['timemarkoperationstatus'] + if timemark_status['operation'] == "copy": + if timemark_status['status'] == 'completed': + finished = True + return finished + + def _check_tm_copy_finished(self, vid, timeout): + finished = False + starttime = time.time() + while True: + self._random_sleep() + if time.time() > starttime + timeout: + msg = (_('FSS get timemark copy timeout on vid: %s') % vid) + raise exception.VolumeBackendAPIException(data=msg) + elif self._check_tm_copy_status(vid): + finished = True + return finished + + # + # TimeView methods + # + + def create_timeview(self, tv_vid, params): + vid = '' + volume_metadata = {} + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_TIMEVIEW, tv_vid) + output = self._fss_request(POST, url, params) + if output and output['rc'] == 0: + if output['copyid'] == -1: + vid = output['id'] + else: + vid = output['copyid'] + volume_metadata['FSS-vid'] = vid + return volume_metadata + + # + # Mirror methods + # + + def create_mirror(self, vid, pool_id): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_MIRROR, vid) + params = dict( + category='virtual', + selectioncriteria='anydrive', + mirrortarget="virtual" + ) + params.update(pool_id) + return self._fss_request(POST, url, params) + + def get_mirror_sync_status(self, vid): + url = '%s/%s/%s?type=syncstatus' % ( + FSS_LOGICALRESOURCE, FSS_MIRROR, vid) + return self._fss_request(GET, url) + + def _check_mirror_sync_status(self, vid): + finished = False + output = self.get_mirror_sync_status(vid) + if output['mirrorsyncstatus']: + mirrorsyncstatus = output['mirrorsyncstatus'] + if mirrorsyncstatus['status'] == "insync": + if mirrorsyncstatus['percentage'] == 0: + finished = True + return finished + + def _set_mirror(self, vid, **kwargs): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_MIRROR, vid) + return self._fss_request(PUT, url, kwargs) + + def sync_mirror(self, vid): + return self._set_mirror(vid, action='sync') + + def promote_mirror(self, vid, new_volume_name): + return self._set_mirror(vid, action='promote', name=new_volume_name) + + # + # Host management methods + # + + def get_server_options(self): + url = '%s/%s' % (FSS_SERVER, FSS_OPTIONS) + return self._fss_request(GET, url) + + def set_server_options(self, action): + url = '%s/%s' % (FSS_SERVER, FSS_OPTIONS) + params = dict( + action=action, + enabled=True + ) + return self._fss_request(PUT, url, params) + + def get_server_name(self): + url = '%s/%s' % (FSS_SERVER, FSS_OPTIONS) + return self._fss_request(GET, url) + + # + # SAN Client management methods + # + + def list_client_initiators(self): + url = '%s/%s/%s' % (FSS_CLIENT, FSS_SANCLIENT, + FSS_ISCSI_CLIENT_INITIATORS) + return self._fss_request(GET, url) + + def get_default_portal(self): + url = '%s/%s/%s' % (FSS_SERVER, FSS_OPTIONS, FSS_PORTAL) + return self._fss_request(GET, url) + + def create_client(self, params): + url = '%s/%s' % (FSS_CLIENT, FSS_SANCLIENT) + return self._fss_request(POST, url, params) + + def list_sanclient_info(self, client_id=None): + url = '%s/%s' % (FSS_CLIENT, FSS_SANCLIENT) + if client_id is not None: + url = '%s/%s/%s' % (FSS_CLIENT, FSS_SANCLIENT, + client_id) + return self._fss_request(GET, url) + + def assign_vdev(self, client_id, params): + url = '%s/%s/%s' % (FSS_CLIENT, FSS_SANCLIENT, client_id) + return self._fss_request(PUT, url, params) + + def unassign_vdev(self, client_id, params): + url = '%s/%s/%s' % (FSS_CLIENT, FSS_SANCLIENT, client_id) + return self._fss_request(PUT, url, params) + + def _get_iscsi_target_info(self, client_id, vid): + lun = 0 + target_name = None + output = self.list_sanclient_info(client_id) + + if 'data' not in output: + msg = _('No target information in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if 'iscsidevices' not in output['data']: + msg = _('No iscsidevices information in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + for iscsidevices in output['data']['iscsidevices']: + if int(vid) == int(iscsidevices['id']): + lun = iscsidevices['lun'] + iscsitarget_info = iscsidevices['iscsitarget'] + for key, value in iscsitarget_info.items(): + if key == 'name': + target_name = value + + return lun, target_name + + def _check_host_mapping_status(self, client_id, target_id): + is_empty = False + hosting_cnt = 0 + output = self.list_sanclient_info(client_id) + if 'data' not in output: + msg = _('No target in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if 'iscsidevices' not in output['data']: + msg = _('No iscsidevices information in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + if len(output['data']['iscsidevices']) == 0: + is_empty = True + else: + for iscsidevices in output['data']['iscsidevices']: + iscsitarget_info = iscsidevices['iscsitarget'] + for key, value in iscsitarget_info.items(): + if key == 'id' and target_id == value: + hosting_cnt += 1 + + if hosting_cnt == 0: + is_empty = True + return is_empty + + def list_iscsi_target_info(self, target_id=None): + url = '%s/%s' % (FSS_CLIENT, FSS_ISCSI_TARGET) + if target_id is not None: + url = '%s/%s/%s' % (FSS_CLIENT, FSS_ISCSI_TARGET, + target_id) + return self._fss_request(GET, url) + + def _get_iscsi_target_id(self, initiator_iqn): + target_id = '' + client_id = '' + output = self.list_iscsi_target_info() + + if 'data' not in output: + msg = _('No target in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if 'iscsitargets' not in output['data']: + msg = _('No iscsitargets for target.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + for targets in output['data']['iscsitargets']: + if 'name' in targets: + if initiator_iqn in targets['name']: + target_id = str(targets['id']) + client_id = str(targets['clientid']) + break + return target_id, client_id + + def create_iscsitarget(self, params): + url = '%s/%s' % (FSS_CLIENT, FSS_ISCSI_TARGET) + return self._fss_request(POST, url, params) + + def delete_iscsi_target(self, target_id): + url = '%s/%s/%s' % (FSS_CLIENT, FSS_ISCSI_TARGET, target_id) + params = dict( + force=True + ) + return self._fss_request(DELETE, url, params) + + def delete_client(self, client_id): + url = '%s/%s/%s' % (FSS_CLIENT, FSS_SANCLIENT, client_id) + return self._fss_request(DELETE, url) + + def _get_fc_client_info(self, client_id, vid): + lun = 0 + output = self.list_sanclient_info(client_id) + if 'data' not in output: + msg = _('No target information in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + if 'fcdevices' not in output['data']: + msg = _('No fcdevices information in given data.') + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + for fcdevices in output['data']['fcdevices']: + if int(vid) == int(fcdevices['id']): + lun = fcdevices['lun'] + + return lun + + # + # Group related methods + # + + def create_group(self, params): + url = '%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP) + return self._fss_request(POST, url, params) + + def list_group_info(self, gid=None): + if gid is not None: + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP, gid) + else: + url = '%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP) + return self._fss_request(GET, url) + + def set_group(self, gid, join_params=None, leave_params=None): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP, gid) + if join_params: + self._fss_request(PUT, url, join_params) + if leave_params: + self._fss_request(PUT, url, leave_params) + + def create_group_timemark_policy(self, gid, params): + url = '%s/%s/%s/%s' % (FSS_LOGICALRESOURCE, + FSS_SNAPSHOT_GROUP, FSS_TIMEMARKPOLICY, gid) + return self._fss_request(POST, url, params) + + def _check_if_group_tm_enabled(self, gid): + timemarkenabled = False + output = self.list_group_info(gid) + if "timemarkenabled" in output['data']: + timemarkenabled = output['data']['timemarkenabled'] + return timemarkenabled + + def create_group_timemark(self, gid, params): + url = '%s/%s/%s/%s' % (FSS_LOGICALRESOURCE, + FSS_SNAPSHOT_GROUP, FSS_TIMEMARK, gid) + return self._fss_request(POST, url, params) + + def get_group_timemark(self, gid): + url = '%s/%s/%s/%s' % (FSS_LOGICALRESOURCE, + FSS_SNAPSHOT_GROUP, FSS_TIMEMARK, gid) + return self._fss_request(GET, url) + + def delete_group_timemark(self, timestamp, params): + url = '%s/%s/%s/%s' % (FSS_LOGICALRESOURCE, + FSS_SNAPSHOT_GROUP, FSS_TIMEMARK, timestamp) + return self._fss_request(DELETE, url, params) + + def delete_group_timemark_policy(self, gid): + url = '%s/%s/%s/%s' % (FSS_LOGICALRESOURCE, + FSS_SNAPSHOT_GROUP, FSS_TIMEMARKPOLICY, gid) + return self._fss_request(DELETE, url) + + def delete_snapshot_group(self, gid): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP, gid) + return self._fss_request(DELETE, url) + + def destroy_group(self, gid): + url = '%s/%s/%s' % (FSS_LOGICALRESOURCE, FSS_SNAPSHOT_GROUP, gid) + return self._fss_request(DELETE, url) + + def get_fss_error_code(self, err_id): + try: + url = '%s/%s/%s' % (FSS_SERVER, FSS_RETURN_CODE, err_id) + output = self._fss_request(GET, url) + if output['rc'] == 0: + return output + except Exception: + msg = (_('Can not find this error code:%s.') % err_id) + raise exception.APIException(reason=msg) + + +class FSSHTTPError(Exception): + + def __init__(self, target, response): + super(FSSHTTPError, self).__init__() + self.target = target + self.code = response['code'] + self.text = response['text'] + self.reason = response['reason'] + + def __str__(self): + msg = ("FSSHTTPError code {0} returned by REST at {1}: {2}\n{3}") + return msg.format(self.code, self.target, + self.reason, self.text) diff --git a/releasenotes/notes/falconstor-cinder-driver-dcb61441cd7601c5.yaml b/releasenotes/notes/falconstor-cinder-driver-dcb61441cd7601c5.yaml new file mode 100644 index 00000000000..595889209ec --- /dev/null +++ b/releasenotes/notes/falconstor-cinder-driver-dcb61441cd7601c5.yaml @@ -0,0 +1,4 @@ +--- +features: + - Added backend driver for FalconStor FreeStor. +