From 2902da9c58fb531a719036583885f8894ae6ac2d Mon Sep 17 00:00:00 2001 From: Sandeep Pawar Date: Mon, 28 Nov 2016 11:54:00 +0530 Subject: [PATCH] Implementation of cinder driver for Veritas HyperScale This is an implementation of a Cinder driver for Veritas HyperScale which is a high performance block storage provider for OpenStack. This implementation features all necessary Cinder functionality including volume attach, detach and snapshot management. DocImpact Change-Id: Ie1af5f5d54b0115974a4024a1756e4e0aa07399a Implements: blueprint veritas-hyperscale-cinder-driver --- cinder/exception.py | 26 + .../volume/drivers/test_vrtshyperscale.py | 521 +++++++++ cinder/volume/drivers/veritas/__init__.py | 0 cinder/volume/drivers/veritas/hs_constants.py | 56 + cinder/volume/drivers/veritas/utils.py | 377 +++++++ .../volume/drivers/veritas/vrtshyperscale.py | 1001 +++++++++++++++++ etc/cinder/rootwrap.d/volume.filters | 3 + ...ts_hyperscale_driver-5b63ab706ea8ae89.yaml | 3 + 8 files changed, 1987 insertions(+) create mode 100644 cinder/tests/unit/volume/drivers/test_vrtshyperscale.py create mode 100644 cinder/volume/drivers/veritas/__init__.py create mode 100644 cinder/volume/drivers/veritas/hs_constants.py create mode 100644 cinder/volume/drivers/veritas/utils.py create mode 100644 cinder/volume/drivers/veritas/vrtshyperscale.py create mode 100644 releasenotes/notes/vrts_hyperscale_driver-5b63ab706ea8ae89.yaml diff --git a/cinder/exception.py b/cinder/exception.py index 1403179eb55..dbd9b380204 100644 --- a/cinder/exception.py +++ b/cinder/exception.py @@ -1363,3 +1363,29 @@ class AttachmentSpecsNotFound(NotFound): class InvalidAttachment(Invalid): message = _("Invalid attachment: %(reason)s") + + +# Veritas driver +class UnableToExecuteHyperScaleCmd(VolumeDriverException): + message = _("Failed HyperScale command for '%(message)s'") + + +class UnableToProcessHyperScaleCmdOutput(VolumeDriverException): + message = _("Failed processing command output '%(cmd_out)s'" + " for HyperScale command") + + +class ErrorInFetchingConfiguration(VolumeDriverException): + message = _("Error in fetching configuration for '%(persona)s'") + + +class ErrorInSendingMsg(VolumeDriverException): + message = _("Error in sending message '%(cmd_error)s'") + + +class ErrorInHyperScaleVersion(VolumeDriverException): + message = _("Error in getting HyperScale version '%(cmd_error)s'") + + +class ErrorInParsingArguments(VolumeDriverException): + message = _("Error in parsing message arguments : Invalid Payload") diff --git a/cinder/tests/unit/volume/drivers/test_vrtshyperscale.py b/cinder/tests/unit/volume/drivers/test_vrtshyperscale.py new file mode 100644 index 00000000000..1ed917b60ec --- /dev/null +++ b/cinder/tests/unit/volume/drivers/test_vrtshyperscale.py @@ -0,0 +1,521 @@ +# Copyright (c) 2017 Veritas Technologies LLC. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from cinder import context +from cinder import exception +from cinder import test +from cinder.tests.unit import fake_snapshot +from cinder.tests.unit import fake_volume +from cinder.volume import configuration as conf +from cinder.volume.drivers.veritas import vrtshyperscale as vrts + + +class FakeDb(object): + def volume_metadata_get(self, *a, **kw): + return {} + + def volume_metadata_update(self, *a, **kw): + return None + + +def _stub_volume(*args, **kwargs): + updates = {'provider_location': 'hyperscale-sv:/hyperscale'} + return fake_volume.fake_db_volume(**updates) + + +def _stub_snapshot(*args, **kwargs): + updates = {'volume': _stub_volume(), 'name': 'vrts'} + return fake_snapshot.fake_db_snapshot(**updates) + + +def _stub_stats(): + data = {} + data["volume_backend_name"] = 'Veritas_HyperScale' + data["vendor_name"] = 'Veritas Technologies LLC' + data["driver_version"] = '1.0' + data["storage_protocol"] = 'nfs' + data['total_capacity_gb'] = 0.0 + data['free_capacity_gb'] = 0.0 + data['reserved_percentage'] = 0 + data['QoS_support'] = False + return data + + +class VRTSHyperScaleDriverTestCase(test.TestCase): + """Test case for Veritas HyperScale VolumeDriver.""" + + driver_name = "cinder.volume.drivers.veritas.vrtshyperscale" + + @staticmethod + def gvmv_side_effect(arg1, arg2): + """Mock side effect for _get_volume_metadata_value.""" + # mock the return of get_volume_metadata_value + # for different arguments + if arg2 == 'Secondary_datanode_key': + return '{9876}' + elif arg2 == 'Secondary_datanode_ip': + return '192.0.2.2' + elif arg2 == 'current_dn_ip': + return '192.0.2.1' + elif arg2 == 'vsa_ip': + return '192.0.2.1' + + @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' + '._fetch_config_for_compute') + @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' + '._fetch_config_for_datanode') + @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' + '._fetch_config_for_controller') + def setUp(self, mock_fcfcntr, mock_fcfd, mock_fcfc): + mock_fcfcntr.return_value = None + mock_fcfd.return_value = None + mock_fcfc.return_value = None + + # Initialise a test seup + super(VRTSHyperScaleDriverTestCase, self).setUp() + + self.configuration = mock.Mock(conf.Configuration(None)) + self.configuration.reserved_percentage = 0 + self.context = context.get_admin_context() + self.driver = vrts.HyperScaleDriver( + db=FakeDb(), configuration=self.configuration) + self.driver.dn_routing_key = '{1234}' + self.driver.datanode_ip = '192.0.2.1' + self.volume = _stub_volume() + self.snapshot = _stub_snapshot() + + @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' + '._get_volume_metadata') + @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' + '._get_replicas') + @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' + '._get_volume_details_for_create_volume') + @mock.patch('cinder.volume.drivers.veritas.utils' + '.message_data_plane') + @mock.patch('cinder.volume.api.API.update_volume_metadata') + def test_create_volume_single_replicas(self, mock_uvm, mock_mdp, + mock_gvdfcv, mock_get_replicas, + mock_gvm): + """Test single volume replica. Happy path test case.""" + # Mock volume meatadata + mock_gvm.return_value = _stub_volume() + + # Mock number of replicas to 1 + mock_get_replicas.return_value = 1 + # assume volume details are populated correctly + mock_gvdfcv.return_value = _stub_volume() + + # assume volume message is sent to data node successfully + mock_mdp.return_value = ("", None) + # assume that the volume metadata gets updated correctly + mock_uvm.return_value = {} + + # declare the expected result + expected_result = { + 'provider_location': 'hyperscale-sv:/hyperscale', + 'metadata': mock_gvm.return_value + } + + # call create volume and get the result + actual_result = self.driver.create_volume(self.volume) + + # Test if the return value matched the expected results + self.assertDictEqual(actual_result, expected_result) + + @mock.patch('cinder.volume.drivers.veritas.utils' + '.get_hyperscale_version') + def test_check_for_setup_error(self, mock_ghv): + """Test check for setup errors in Veritas HyperScale driver. + + The test case checks happy path execution when driver version 1.0.0 + is installed. + """ + mock_ghv.return_value = "1.0.0" + + # check the driver for setup errors + self.driver.check_for_setup_error() + + @mock.patch('cinder.volume.drivers.veritas.utils' + '.get_hyperscale_version') + def test_check_for_setup_error_unsupported_version(self, mock_ghv): + """Test check for setup errors in Veritas HyperScale driver. + + The test case checks happy path execution when driver version 1.0.0 + is installed. + """ + mock_ghv.return_value = "1.0.0.1" + + # check the driver for setup errors + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.check_for_setup_error) + + @mock.patch('cinder.volume.drivers.veritas.utils' + '.get_hyperscale_version') + def test_check_for_setup_error_exception(self, mock_ghv): + """Test check for setup errors in Veritas HyperScale driver. + + The test case checks happy path execution when driver version 1.0.0 + is installed. + """ + mock_ghv.side_effect = exception.ErrorInHyperScaleVersion( + cmd_error="mock error") + + # check the driver for setup errors + self.assertRaises(exception.VolumeBackendAPIException, + self.driver.check_for_setup_error) + + @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' + '._get_volume_metadata_value') + @mock.patch('cinder.volume.drivers.veritas.utils' + '.message_data_plane') + def test_delete_volume_no_replica(self, mock_mdp, mock_gvmv): + """Test happy path for delete_volume one data nodes.""" + mock_gvmv.return_value = None + self.driver.delete_volume(self.volume) + + message_body = {'display_name': self.volume['name']} + + mock_mdp.assert_called_with(self.driver.dn_routing_key, + 'hyperscale.storage.dm.volume.delete', + **message_body) + + @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' + '._get_volume_metadata_value') + @mock.patch('cinder.volume.drivers.veritas.utils' + '.message_data_plane') + def test_delete_volume_more_than_one_replica(self, mock_mdp, mock_gvmv): + """Test happy path for delete_volume with more than one data nodes.""" + mock_gvmv.side_effect = VRTSHyperScaleDriverTestCase.gvmv_side_effect + + message_body = {'display_name': self.volume['name']} + + # make the delete call + self.driver.delete_volume(self.volume) + + # check if delete volume sent to reflection target on data node + # check if mq message sent with 'Secondary_datanode_key' + mock_mdp.assert_any_call( + '{9876}', 'hyperscale.storage.dm.volume.delete', **message_body) + + # check if the delete is sent to primary data node as well + mock_mdp.assert_any_call(self.driver.dn_routing_key, + 'hyperscale.storage.dm.volume.delete', + **message_body) + + @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' + '._get_volume_metadata_value') + @mock.patch('cinder.volume.drivers.veritas.utils' + '.message_data_plane') + def test_delete_volume_no_replica_failure(self, mock_mdp, mock_gvmv): + """Failure case for delete_volume one node in data plane.""" + mock_gvmv.side_effect = None + self.driver.delete_volume(self.volume) + mock_mdp.side_effect = exception.UnableToProcessHyperScaleCmdOutput( + cmd_out='mock error') + self.assertRaises(exception.VolumeIsBusy, self.driver.delete_volume, + self.volume) + + @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' + '._get_volume_metadata_value') + @mock.patch('cinder.volume.drivers.veritas.utils' + '.message_data_plane') + def test_delete_volume_more_than_one_replica_failure(self, mock_mdp, + mock_gvmv): + """failure case for delete_volume with more than one data nodes.""" + mock_gvmv.side_effect = VRTSHyperScaleDriverTestCase.gvmv_side_effect + + mock_mdp.side_effect = exception.UnableToProcessHyperScaleCmdOutput( + cmd_out='mock error') + + self.assertRaises(exception.VolumeIsBusy, self.driver.delete_volume, + self.volume) + + @mock.patch('cinder.volume.drivers.veritas.utils' + '.get_guid_with_curly_brackets') + def test_delete_snapshot_force_flag(self, mock_ggwcb): + """Test snapshot deletion does not happen if force flag is set.""" + # get a mock snapshot object + snapshot = fake_snapshot.fake_db_snapshot() + # set the force in metadata of snapshot + snapshot['metadata'] = {"force": "force"} + + # call the delete volume + self.driver.delete_snapshot(snapshot) + + # if snapshot has force set in metadata then + # get_guid_with_curly_brackets() will not be called because we + # return as soon as we see force + mock_ggwcb.assert_not_called() + + def test_delete_snapshot_isbusy_flag(self): + """Test snapshot deletion throws exception if snapshot is busy.""" + # get a mock snapshot object + snapshot = fake_snapshot.fake_db_snapshot() + # set the force in metadata of snapshot + snapshot['metadata'] = {"is_busy": "is_busy"} + + # call the delete volume to check if it raises Busy Exception + self.assertRaises(exception.SnapshotIsBusy, + self.driver.delete_snapshot, snapshot) + + @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' + '._get_volume_metadata') + @mock.patch('cinder.volume.api.API.get_volume') + @mock.patch('cinder.volume.drivers.veritas.utils' + '.message_data_plane') + def test_delete_snapshot_from_primary_dn(self, mock_mdp, mock_gv, + mock_gvm): + """Test snapshot deletion from primary DN.""" + # get mock volume + mock_gv.return_value = None + mock_gvm.return_value = {'current_dn_ip': self.driver.datanode_ip} + + message_body = {} + message_body['volume_guid'] = '{' + self.volume['id'] + '}' + message_body['snapshot_id'] = '{' + self.snapshot['id'] + '}' + + # call delete snapshot + self.driver.delete_snapshot(self.snapshot) + + # assert msg is sent over mq with primary DN routing key + mock_mdp.assert_called_with(self.driver.dn_routing_key, + 'hyperscale.storage.dm.version.delete', + **message_body) + + @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' + '._get_volume_metadata') + @mock.patch('cinder.volume.api.API.get_volume') + @mock.patch('cinder.volume.drivers.veritas.utils' + '.message_data_plane') + @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' + '._get_volume_metadata_value') + def test_delete_snapshot_from_current_dn(self, mock_gvmv, mock_mdp, + mock_gv, mock_gvm): + """Test snapshot deletion DN value from volume.""" + # get a mock volume + mock_gv.return_value = _stub_volume() + + # get a mock value of DN from volume + mock_gvmv.return_value = '{9876}' + + message_body = {} + message_body['volume_guid'] = '{' + self.volume['id'] + '}' + message_body['snapshot_id'] = '{' + self.snapshot['id'] + '}' + + # call delete snapshot + self.driver.delete_snapshot(self.snapshot) + + # assert msg is sent over mq with key from volume's current_dn_owner + mock_mdp.assert_called_with( + '{9876}', 'hyperscale.storage.dm.version.delete', **message_body) + + @mock.patch('cinder.volume.drivers.veritas.utils' + '.message_data_plane') + def test_fetch_volume_stats_failure(self, mock_mdp): + """Test case checking failure of pool for fetching stats.""" + # since we have initialised the pool to None in setup() + # the function will return only the stub without populating + # any free and used stats + mock_obj = {'payload': {}} + + mock_mdp.return_value = (mock_obj, None) + self.assertDictEqual(_stub_stats(), self.driver._fetch_volume_status()) + + @mock.patch('cinder.volume.drivers.veritas.utils' + '.message_data_plane') + def test_create_cloned_volume_with_exception(self, mock_mdp): + """Test case throws exception when command failed to execute.""" + vol_a = _stub_volume() + vol_b = _stub_volume() + mock_mdp.side_effect = exception.UnableToExecuteHyperScaleCmd( + cmd_out='mock error') + self.assertRaises(exception.UnableToExecuteHyperScaleCmd, + self.driver.create_cloned_volume, vol_b, vol_a) + + @mock.patch('cinder.volume.drivers.veritas.utils' + '.message_data_plane') + @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale' + '.HyperScaleDriver._select_rt') + def test_create_cloned_volume_with_no_replica(self, mock_srt, mock_mdp): + """Test case clone volume when there is no replica.""" + mock_obj = {'payload': {}} + mock_mdp.return_value = (mock_obj, None) + mock_srt.return_value = (None, None) + vol_a = _stub_volume() + vol_b = _stub_volume() + self.assertDictContainsSubset({ + 'provider_location': 'hyperscale-sv:/hyperscale' + }, self.driver.create_cloned_volume(vol_b, vol_a)) + + @mock.patch('cinder.volume.drivers.veritas.utils' + '.message_data_plane') + @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale' + '.HyperScaleDriver._select_rt') + @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' + '._get_volume_metadata_value') + def test_create_cloned_volume_with_replica(self, mock_gvmv, mock_srt, + mock_mdp): + """Test case clone volume when there is replica.""" + mock_gvmv.side_effect = VRTSHyperScaleDriverTestCase.gvmv_side_effect + mock_obj = {'payload': {}} + mock_mdp.return_value = (mock_obj, None) + mock_srt.return_value = ('{1234}', '192.0.2.2') + vol_a = _stub_volume() + vol_b = _stub_volume() + metadata = { + 'current_dn_owner': '{1234}', + 'Potential_secondary_key': '{1234}', + 'Primary_datanode_ip': '192.0.2.1', + 'Potential_secondary_ip': '192.0.2.2', + 'current_dn_ip': '192.0.2.1', + 'source_volid': vol_a['id'], + 'size': vol_a['size'] + } + self.assertDictContainsSubset({ + 'provider_location': 'hyperscale-sv:/hyperscale', + 'metadata': metadata + }, self.driver.create_cloned_volume(vol_b, vol_a)) + + @mock.patch('cinder.volume.drivers.veritas.utils' + '.message_data_plane') + def test_extend_volume_with_exception(self, mock_mdp): + """Test case extend volume to the given size in GB.""" + mock_mdp.side_effect = exception.UnableToProcessHyperScaleCmdOutput( + cmd_out='mock error') + self.assertRaises(exception.VolumeDriverException, + self.driver.extend_volume, _stub_volume(), 256) + + @mock.patch('cinder.volume.drivers.veritas.utils' + '.message_data_plane') + def test_extend_volume_no_exception(self, mock_mdp): + """Test case extend volume thorws exception.""" + mock_mdp.return_value = (None, None) + self.driver.extend_volume(_stub_volume(), 256) + + @mock.patch('cinder.volume.drivers.veritas.utils' + '.message_data_plane') + def test_create_volume_from_snapshot_with_exception(self, mock_mdp): + """Test case create volume from snapshot thorws exception.""" + fake_volume, fake_snapshot = _stub_volume(), _stub_snapshot() + mock_mdp.side_effect = exception.UnableToExecuteHyperScaleCmd( + cmd_out='mock error') + self.assertRaises(exception.UnableToExecuteHyperScaleCmd, + self.driver.create_volume_from_snapshot, fake_volume, + fake_snapshot) + + @mock.patch('cinder.volume.drivers.veritas.utils' + '.message_data_plane') + @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale' + '.HyperScaleDriver._select_rt') + def test_create_volume_from_snapshot_with_no_replica(self, mock_srt, + mock_mdp): + """Test case create volume from snapshot when there is no replica.""" + mock_obj = {'payload': {}} + mock_mdp.return_value = (mock_obj, None) + mock_srt.return_value = (None, None) + fake_volume, fake_snapshot = _stub_volume(), _stub_snapshot() + self.assertDictContainsSubset({ + 'provider_location': 'hyperscale-sv:/hyperscale' + }, self.driver.create_volume_from_snapshot(fake_volume, fake_snapshot)) + + @mock.patch('cinder.volume.drivers.veritas.utils' + '.message_data_plane') + @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale' + '.HyperScaleDriver._select_rt') + @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' + '._get_volume_metadata_value') + def test_create_volume_from_snapshot_with_replica(self, mock_gvmv, + mock_srt, mock_mdp): + """Test case create volume from snapshot when there is replica.""" + mock_gvmv.side_effect = VRTSHyperScaleDriverTestCase.gvmv_side_effect + mock_obj = {'payload': {}} + mock_mdp.return_value = (mock_obj, None) + mock_srt.return_value = ('{1234}', '192.0.2.2') + fake_volume, fake_snapshot = _stub_volume(), _stub_snapshot() + metadata = { + 'current_dn_owner': '{1234}', + 'Potential_secondary_key': '{1234}', + 'Primary_datanode_ip': '192.0.2.1', + 'Potential_secondary_ip': '192.0.2.2', + 'current_dn_ip': '192.0.2.1', + 'snapshot_id': fake_snapshot['id'], + 'parent_volume_guid': '{' + fake_snapshot['volume']['id'] + '}' + } + self.assertDictContainsSubset({ + 'provider_location': 'hyperscale-sv:/hyperscale', + 'metadata': metadata + }, self.driver.create_volume_from_snapshot(fake_volume, fake_snapshot)) + + def test_initialize_connection(self): + """Test case intialize_connection.""" + fake_volume = _stub_volume() + expected_data = { + 'driver_volume_type': 'veritas_hyperscale', + 'data': { + 'export': fake_volume['provider_location'], + 'name': fake_volume['name'] + } + } + self.assertEqual(expected_data, + self.driver.initialize_connection(fake_volume, None)) + + @mock.patch('cinder.volume.drivers.veritas.utils' + '.message_compute_plane') + @mock.patch('cinder.volume.drivers.veritas.utils' + '.episodic_snap') + @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' + '._get_volume_metadata_value') + def test_create_snapshot_with_exception( + self, mock_gvmv, mock_es, mock_mcp): + """Test case create snapshot throws exception.""" + mock_gvmv.side_effect = VRTSHyperScaleDriverTestCase.gvmv_side_effect + mock_es_obj = {'payload': {'update': False}} + mock_es.return_value = mock_es_obj + mock_mcp.side_effect = exception.UnableToExecuteHyperScaleCmd( + cmd_out='mock error') + fake_snapshot = _stub_snapshot() + self.assertRaises(exception.UnableToExecuteHyperScaleCmd, + self.driver.create_snapshot, fake_snapshot) + + @mock.patch('cinder.volume.drivers.veritas.utils' + '.message_controller') + @mock.patch('cinder.volume.drivers.veritas.utils' + '.message_data_plane') + @mock.patch('cinder.volume.drivers.veritas.utils' + '.episodic_snap') + @mock.patch('cinder.volume.drivers.veritas.vrtshyperscale.HyperScaleDriver' + '._get_volume_metadata_value') + @mock.patch('cinder.volume.drivers.veritas.utils' + '.message_compute_plane') + def test_create_snapshot_user( + self, mock_cdp, mock_gvmv, mock_es, mock_mdp, mock_mc): + """Test case user snapshot.""" + mock_gvmv.side_effect = VRTSHyperScaleDriverTestCase.gvmv_side_effect + mock_es_obj = {'payload': {'update': False}} + mock_es.return_value = mock_es_obj + mock_obj = {'payload': {}} + mock_mdp.return_value = ("", None) + mock_mc.return_value = ("", None) + mock_cdp.return_value = (mock_obj, None) + fake_snapshot = _stub_snapshot() + expected = { + 'metadata': { + 'status': 'creating', + 'datanode_ip': '192.0.2.1', + 'TYPE': vrts.TYPE_USER_SNAP + } + } + self.assertEqual(expected, self.driver.create_snapshot(fake_snapshot)) diff --git a/cinder/volume/drivers/veritas/__init__.py b/cinder/volume/drivers/veritas/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/volume/drivers/veritas/hs_constants.py b/cinder/volume/drivers/veritas/hs_constants.py new file mode 100644 index 00000000000..cf3c330b94d --- /dev/null +++ b/cinder/volume/drivers/veritas/hs_constants.py @@ -0,0 +1,56 @@ +# Copyright (c) 2017 Veritas Technologies LLC. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Error Codes +""" + +EX_BAD_PARAM = 10 +EX_BAD_MESSAGE = 106 +MSG_SUCCESS = 0 +MSG_ERROR = 1 + +""" +Constants +""" +HS_VHOST = "/" +ACK_YES = 1 +ACK_NO = 0 +BLK_YES = 1 +BLK_NO = 0 +EXCH_DIRECT = "direct" +EXCH_FANOUT = "fanout" +EXCH_TOPIC = "topic" + +MSG_REQUEST = 1 +MSG_RESPONSE = 2 +MSG_TOKEN = "token" +MSG_OWNER = "owner" +MSG_TYPE = "type" +MSG_ERROR = "err_code" +MSG_ACK = "ack" +MSG_BLK = "blocking" +MSG_BLK_INFO = "blocking_info" +MSG_BLK_NAME = "name" +MSG_BLK_BINDKEY = "bindkey" +MSG_BLK_TYPE = "type" +MSG_PAYLOAD = "payload" + +# HyperScale Controller Exchange +HS_CONTROLLER_EXCH = 'hyperscale-controller' +HS_RPC_EXCH = 'hyperscale-recv' +HS_DATANODE_EXCH = 'hyperscale-datanode' +HS_COMPUTE_EXCH = 'hyperscale-storage' + +SNAP_RESTORE_RF = 3 diff --git a/cinder/volume/drivers/veritas/utils.py b/cinder/volume/drivers/veritas/utils.py new file mode 100644 index 00000000000..df89959d712 --- /dev/null +++ b/cinder/volume/drivers/veritas/utils.py @@ -0,0 +1,377 @@ +# Copyright (c) 2017 Veritas Technologies LLC. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import uuid + +from oslo_concurrency import processutils as putils +from oslo_log import log as logging +from oslo_utils import excutils +import six + +from cinder import exception +from cinder import utils +from cinder.volume.drivers.veritas import hs_constants as constants + +LOG = logging.getLogger(__name__) + + +def _populate_message_body(kwargs): + message_body = {} + # Build message body from kwargs + for key, value in kwargs.items(): + if value is not None: + message_body[key] = value + + return message_body + + +def generate_routingkey(): + return six.text_type(uuid.uuid1()) + + +def get_guid_with_curly_brackets(guid): + return "{%s}" % guid if guid else guid + + +def get_hyperscale_image_id(): + return "{%s}" % uuid.uuid1() + + +def get_hyperscale_version(): + + version = None + cmd_err = None + try: + cmd_arg = {'operation': 'version'} + # create a json for cmd argument + cmdarg_json = json.dumps(cmd_arg) + + # call hscli for version + (cmd_out, cmd_err) = hsexecute(cmdarg_json) + + # cmd_err should be None in case of successful execution of cmd + if not cmd_err: + processed_output = process_cmd_out(cmd_out) + version = processed_output.get('payload') + else: + LOG.error("Error %s in getting hyperscale version", + cmd_err) + raise exception.ErrorInHyperScaleVersion(cmd_err=cmd_err) + except (exception.UnableToExecuteHyperScaleCmd, + exception.UnableToProcessHyperScaleCmdOutput): + LOG.error("Exception in running the command for version", + exc_info=True) + raise exception.UnableToExecuteHyperScaleCmd(message="version") + + return version + + +def get_datanode_id(): + + dnid = None + cmd_out = None + cmd_err = None + try: + cmd_arg = {'operation': 'get_datanode_id'} + # create a json for cmd argument + cmdarg_json = json.dumps(cmd_arg) + + # call hscli for get_datanode_id + (cmd_out, cmd_err) = hsexecute(cmdarg_json) + + # cmd_err should be None in case of successful execution of cmd + if not cmd_err: + processed_output = process_cmd_out(cmd_out) + dnid = processed_output.get('payload') + else: + LOG.error("Error %s in getting datanode hypervisor id", + cmd_err) + raise exception.UnableToExecuteHyperScaleCmd( + message=cmdarg_json) + except exception.UnableToExecuteHyperScaleCmd: + with excutils.save_and_reraise_exception(): + LOG.debug("Unable to execute get_datanode_id", exc_info=True) + + except exception.UnableToProcessHyperScaleCmdOutput: + with excutils.save_and_reraise_exception(): + LOG.debug("Unable to process get_datanode_id output", + exc_info=True) + return dnid + + +def episodic_snap(meta): + + cmd_out = None + cmd_err = None + out_meta = None + try: + cmd_arg = {} + cmd_arg['operation'] = 'episodic_snap' + cmd_arg['metadata'] = meta + # create a json for cmd argument + cmdarg_json = json.dumps(cmd_arg) + + # call hscli for episodic_snap + (cmd_out, cmd_err) = hsexecute(cmdarg_json) + + # cmd_err should be None in case of successful execution of cmd + if not cmd_err: + processed_output = process_cmd_out(cmd_out) + out_meta = processed_output.get('payload') + else: + LOG.error("Error %s in processing episodic_snap", + cmd_err) + raise exception.UnableToExecuteHyperScaleCmd( + message=cmdarg_json) + except exception.UnableToExecuteHyperScaleCmd: + with excutils.save_and_reraise_exception(): + LOG.debug("Unable to execute episodic_snap", exc_info=True) + + except exception.UnableToProcessHyperScaleCmdOutput: + with excutils.save_and_reraise_exception(): + LOG.debug("Unable to process episodic_snap output", + exc_info=True) + return out_meta + + +def get_image_path(image_id, op_type='image'): + + cmd_out = None + cmd_err = None + image_path = None + try: + cmd_arg = {} + if op_type == 'image': + cmd_arg['operation'] = 'get_image_path' + elif op_type == 'volume': + cmd_arg['operation'] = 'get_volume_path' + cmd_arg['image_id'] = image_id + # create a json for cmd argument + cmdarg_json = json.dumps(cmd_arg) + + # call hscli for get_image_path + (cmd_out, cmd_err) = hsexecute(cmdarg_json) + + # cmd_err should be None in case of successful execution of cmd + if not cmd_err: + processed_output = process_cmd_out(cmd_out) + image_path = processed_output.get('payload') + else: + LOG.error("Error %s in processing get_image_path", + cmd_err) + raise exception.UnableToExecuteHyperScaleCmd( + message=cmdarg_json) + except exception.UnableToExecuteHyperScaleCmd: + with excutils.save_and_reraise_exception(): + LOG.debug("Unable to execute get_image_path", exc_info=True) + + except exception.UnableToProcessHyperScaleCmdOutput: + with excutils.save_and_reraise_exception(): + LOG.debug("Unable to process get_image_path output", + exc_info=True) + return image_path + + +def update_image(image_path, volume_id, hs_img_id): + cmd_out = None + cmd_err = None + output = None + try: + cmd_arg = {} + cmd_arg['operation'] = 'update_image' + cmd_arg['image_path'] = image_path + cmd_arg['volume_id'] = volume_id + cmd_arg['hs_image_id'] = hs_img_id + # create a json for cmd argument + cmdarg_json = json.dumps(cmd_arg) + + (cmd_out, cmd_err) = hsexecute(cmdarg_json) + + # cmd_err should be None in case of successful execution of cmd + if not cmd_err: + output = process_cmd_out(cmd_out) + else: + LOG.error("Error %s in execution of update_image", + cmd_err) + raise exception.UnableToExecuteHyperScaleCmd( + message=cmdarg_json) + except exception.UnableToExecuteHyperScaleCmd: + with excutils.save_and_reraise_exception(): + LOG.debug("Unable to execute update_image", exc_info=True) + + except exception.UnableToProcessHyperScaleCmdOutput: + with excutils.save_and_reraise_exception(): + LOG.debug("Unable to process update_image output", + exc_info=True) + return output + + +def hsexecute(cmdarg_json): + + cmd_out = None + cmd_err = None + try: + # call hyperscale cli + (cmd_out, cmd_err) = utils.execute("hscli", + cmdarg_json, + run_as_root=True) + except (putils.UnknownArgumentError, putils.ProcessExecutionError, + exception.ErrorInParsingArguments, OSError): + LOG.error("Exception in running the command for %s", + cmdarg_json, + exc_info=True) + raise exception.UnableToExecuteHyperScaleCmd(message=cmdarg_json) + + except Exception: + LOG.error("Internal exception in cmd for %s", cmdarg_json, + exc_info=True) + raise exception.UnableToExecuteHyperScaleCmd(message=cmdarg_json) + + return (cmd_out, cmd_err) + + +def process_cmd_out(cmd_out): + """Process the cmd output.""" + + output = None + + try: + # get the python object from the cmd_out + output = json.loads(cmd_out) + error_code = output.get('err_code') + if error_code: + error_message = output.get('err_msg') + operation = output.get('token') + LOG.error("Failed to perform %(operation)s with error code" + " %(err_code)s, error message is %(err_msg)s", + {"operation": operation, + "err_code": error_code, + "err_msg": error_message}) + except ValueError: + raise exception.UnableToProcessHyperScaleCmdOutput(cmd_out=cmd_out) + + return output + + +def check_for_setup_error(): + return True + + +def get_configuration(persona): + """Get required configuration from controller.""" + + msg_body = {'persona': persona} + configuration = None + try: + cmd_out, cmd_error = message_controller( + constants.HS_CONTROLLER_EXCH, + 'hyperscale.controller.get.configuration', + **msg_body) + LOG.debug("Response Message from Controller: %s", cmd_out) + payload = cmd_out.get('payload') + configuration = payload.get('config_data') + + except (exception.ErrorInSendingMsg, + exception.UnableToExecuteHyperScaleCmd, + exception.UnableToProcessHyperScaleCmdOutput): + LOG.exception("Failed to get configuration from controller") + raise exception.ErrorInFetchingConfiguration(persona=persona) + + return configuration + + +def _send_message(exchange, routing_key, message_token, **kwargs): + """Send message to specified node.""" + + cmd_out = None + cmd_err = None + processed_output = None + msg = None + try: + LOG.debug("Sending message: %s", message_token) + + # Build message from kwargs + message_body = _populate_message_body(kwargs) + cmd_arg = {} + cmd_arg["operation"] = "message" + cmd_arg["msg_body"] = message_body + cmd_arg["msg_token"] = message_token + # exchange name + cmd_arg["exchange_name"] = exchange + # routing key + cmd_arg["routing_key"] = routing_key + # create a json for cmd argument + cmdarg_json = json.dumps(cmd_arg) + + (cmd_out, cmd_err) = hsexecute(cmdarg_json) + + # cmd_err should be none in case of successful execution of cmd + if cmd_err: + LOG.debug("Sending message failed. Error %s", cmd_err) + raise exception.ErrorInSendingMsg(cmd_err=cmd_err) + else: + processed_output = process_cmd_out(cmd_out) + + except exception.UnableToExecuteHyperScaleCmd: + with excutils.save_and_reraise_exception(): + msg = ("Unable to execute HyperScale command for %(cmd)s" + " to exchange %(exch)s with key %(rt_key)s") + LOG.debug(msg, {"cmd": message_token, + "exch": exchange, + "rt_key": routing_key}, + exc_info=True) + + except exception.UnableToProcessHyperScaleCmdOutput: + with excutils.save_and_reraise_exception(): + msg = ("Unable to process msg %(message)s" + " to exchange %(exch)s with key %(rt_key)s") + LOG.debug(msg, {"message": message_token, + "exch": exchange, + "rt_key": routing_key}) + + return (processed_output, cmd_err) + + +def message_compute_plane(routing_key, message_token, **kwargs): + """Send message to compute plane.""" + + LOG.debug("Sending message to compute plane") + + return _send_message(constants.HS_COMPUTE_EXCH, + routing_key, + message_token, + **kwargs) + + +def message_data_plane(routing_key, message_token, **kwargs): + """Send message to data node.""" + + LOG.debug("Sending message to data plane") + + return _send_message(constants.HS_DATANODE_EXCH, + routing_key, + message_token, + **kwargs) + + +def message_controller(routing_key, message_token, **kwargs): + """Send message to controller.""" + + LOG.debug("Sending message to controller") + + return _send_message(constants.HS_CONTROLLER_EXCH, + routing_key, + message_token, + **kwargs) diff --git a/cinder/volume/drivers/veritas/vrtshyperscale.py b/cinder/volume/drivers/veritas/vrtshyperscale.py new file mode 100644 index 00000000000..4ce370a47de --- /dev/null +++ b/cinder/volume/drivers/veritas/vrtshyperscale.py @@ -0,0 +1,1001 @@ +# Copyright (c) 2017 Veritas Technologies LLC. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Cinder Driver for HyperScale +""" + +import os + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +import six + +from cinder import exception +from cinder.i18n import _ +from cinder.image import image_utils +from cinder import interface +from cinder import utils +from cinder.volume import driver +from cinder.volume.drivers.veritas import hs_constants as constants +from cinder.volume.drivers.veritas import utils as util + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) +TYPE_EPISODIC_SNAP = '0' +TYPE_USER_SNAP = '1' +TYPE_WORKFLOW_SNAP = '2' + +BLOCK_SIZE = 8 +MAX_REPLICAS = 2 +DEFAULT_REPLICAS = 1 +POOL_NAME = '{30c39970-ad80-4950-5490-8431abfaaaf0}' +HYPERSCALE_VERSION = '1.0.0' +PROVIDER_LOCATION_MNT = "/hyperscale" +PROVIDER_LOCATION = 'hyperscale-sv:' + PROVIDER_LOCATION_MNT + + +@interface.volumedriver +class HyperScaleDriver(driver.VolumeDriver): + + VERSION = '1.0' + # ThirdPartySytems wiki page + CI_WIKI_NAME = "Veritas_HyperScale_CI" + + def __init__(self, *args, **kwargs): + """Initialization""" + + super(HyperScaleDriver, self).__init__(*args, **kwargs) + + self.compute_map = {} + self.vsa_map = {} + self.compute_meta_map = {} + self.vsa_compute_map = {} + self.old_total = 0 + self.old_free = 0 + self.my_dnid = None + + @staticmethod + def _fetch_config_for_controller(): + return HyperScaleDriver._fetch_config_information( + persona='controller') + + @staticmethod + def _fetch_config_for_compute(): + return HyperScaleDriver._fetch_config_information( + persona='compute') + + @staticmethod + def _fetch_config_for_datanode(): + return HyperScaleDriver._fetch_config_information( + persona='datanode') + + @staticmethod + def _fetch_config_information(persona): + # Get hyperscale config information for persona + configuration = util.get_configuration(persona) + return configuration + + @utils.trace_method + def check_for_setup_error(self): + # check if HyperScale has been installed correctly + try: + version = util.get_hyperscale_version() + + if version != HYPERSCALE_VERSION: + raise exception.VolumeBackendAPIException( + data=(_("Unsupported version: %s") % version)) + except (exception.ErrorInHyperScaleVersion, + exception.UnableToExecuteHyperScaleCmd): + err_msg = _('Exception in getting HyperScale version') + LOG.exception(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + def _get_replicas(self, volume, metadata): + """Get the replicas.""" + try: + ref_targets = self._get_volume_metadata_value(metadata, + 'reflection_targets') + if ref_targets is not None: + replicas = MAX_REPLICAS + else: + replicas = DEFAULT_REPLICAS + + except Exception: + LOG.exception("Exception in getting reflection targets") + replicas = DEFAULT_REPLICAS + + LOG.debug("Number of replicas: %s", replicas) + return replicas + + @utils.trace_method + def do_setup(self, context): + """Any initialization the volume driver does while starting.""" + super(HyperScaleDriver, self).do_setup(context) + + try: + # Get computes info + computes = HyperScaleDriver._fetch_config_for_compute() + if computes is None: + computes = {} + + for compute in computes.keys(): + if 'disabled' in computes[compute].keys(): + disabled = computes[compute]['disabled'] + if disabled == "1": + continue + vsa_ip = computes[compute]['vsa_ip'] + vsa_isolated_ip = computes[compute]['vsa_isolated_ip'] + vsa_section_header = computes[compute]['vsa_section_header'] + compute_name = computes[compute]['compute_name'] + self.compute_map[vsa_ip] = vsa_isolated_ip + self.vsa_map[vsa_ip] = vsa_section_header + self.compute_meta_map[compute_name] = vsa_ip + self.vsa_compute_map[vsa_ip] = compute_name + + # Get controller info + cntr_info = HyperScaleDriver._fetch_config_for_controller() + if cntr_info is None: + cntr_info = {} + + # Get data node info + self.my_dnid = util.get_datanode_id() + datanodes = HyperScaleDriver._fetch_config_for_datanode() + if datanodes is None: + datanodes = {} + + for key, value in datanodes.items(): + if self.my_dnid == value['hypervisor_id']: + self.datanode_hostname = value['datanode_name'] + self.datanode_ip = value['data_ip'] + self.dn_routing_key = value['hypervisor_id'] + + LOG.debug("In init compute_map %s", self.compute_map) + LOG.debug("In init vsa_map %s", self.vsa_map) + LOG.debug("In init compute_meta_map %s", self.compute_meta_map) + + except (exception.UnableToProcessHyperScaleCmdOutput, + exception.ErrorInFetchingConfiguration): + err_msg = _("Unable to initialise the Veritas cinder driver") + LOG.exception(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + except Exception: + err_msg = _("Internal error occurred") + LOG.exception(err_msg) + raise exception.VolumeBackendAPIException(data=err_msg) + + @utils.trace_method + def create_cloned_volume(self, volume, src_vref): + """Creates a clone of the specified volume.""" + + LOG.debug("Clone volume") + model_update = {} + try: + LOG.debug("Clone new volume %(t_id)s from source volume %(s_id)s", + {"t_id": volume['id'], "s_id": src_vref['id']}) + # 1. Make a call to DN + # Check if current_dn_owner is set. + + rt_key = None + # Get metadata for volume + metadata = self._get_volume_metadata(src_vref) + rt_key = self._get_volume_metadata_value(metadata, + 'current_dn_owner') + if rt_key is None: + rt_key = self.dn_routing_key + + util.message_data_plane( + rt_key, + 'hyperscale.storage.dm.volume.clone', + pool_name=POOL_NAME, + display_name=util.get_guid_with_curly_brackets( + volume['id']), + version_name=util.get_guid_with_curly_brackets( + src_vref['id']), + volume_raw_size=volume['size'], + volume_qos=1, + parent_volume_guid=util.get_guid_with_curly_brackets( + src_vref['id']), + user_id=util.get_guid_with_curly_brackets( + volume['user_id']), + project_id=util.get_guid_with_curly_brackets( + volume['project_id']), + volume_guid=util.get_guid_with_curly_brackets( + volume['id'])) + + LOG.debug("Volume cloned successfully on data node") + + # Get metadata for volume + volume_metadata = self._get_volume_metadata(volume) + parent_cur_dn = self._get_volume_metadata_value(metadata, + 'current_dn_ip') + + metadata_update = {} + metadata_update['Primary_datanode_ip'] = parent_cur_dn + metadata_update['current_dn_owner'] = rt_key + metadata_update['current_dn_ip'] = parent_cur_dn + metadata_update['source_volid'] = src_vref['id'] + metadata_update['size'] = src_vref['size'] + + # 2. Choose a potential replica here. + # The actual decision to have potential replica is made in NOVA. + rt_key, rt_dn_ip = self._select_rt(volume, + volume_metadata, + only_select=True) + + if rt_key and rt_dn_ip: + metadata_update['Potential_secondary_key'] = rt_key + metadata_update['Potential_secondary_ip'] = rt_dn_ip + + except (exception.UnableToExecuteHyperScaleCmd, + exception.UnableToProcessHyperScaleCmdOutput): + with excutils.save_and_reraise_exception(): + LOG.exception('Exception in clone volume', exc_info=True) + except exception.InvalidMetadataType: + with excutils.save_and_reraise_exception(): + LOG.exception('Exception updating metadata in clone' + ' volume', exc_info=True) + + volume_metadata.update(metadata_update) + volume['provider_location'] = PROVIDER_LOCATION + model_update = {'provider_location': volume['provider_location'], + 'metadata': volume_metadata} + + return model_update + + def _get_datanodes_info(self): + # Get hyperscale datanode config information from controller + + msg_body = {} + data = None + + try: + cmd_out, cmd_error = util.message_controller( + constants.HS_CONTROLLER_EXCH, + 'hyperscale.controller.get.membership', + **msg_body) + LOG.debug("Response Message from Controller: %s", + cmd_out) + payload = cmd_out.get('payload') + data = payload.get('of_membership') + + except (exception.UnableToExecuteHyperScaleCmd, + exception.UnableToProcessHyperScaleCmdOutput): + with excutils.save_and_reraise_exception(): + LOG.exception("Failed to get datanode config " + "information from controller") + + return data + + def _select_rt(self, volume, metadata, only_select=False): + + # For the boot vdisk(first vdisk) of the instance, choose any + # reflection target other than this. For the data disks, + # retain the reflection target. + # It will be passed by the caller after reading it from instance + # metadata. + + LOG.debug("_select_rt ") + rt_key = self._get_volume_metadata_value(metadata, + 'Secondary_datanode_key') + rt_dn_ip = self._get_volume_metadata_value(metadata, + 'Secondary_datanode_ip') + current_dn_ip = self._get_volume_metadata_value(metadata, + 'current_dn_ip') + + if current_dn_ip is not None and rt_dn_ip == current_dn_ip: + return None, None + + if rt_key is not None and rt_dn_ip is not None: + return rt_key, rt_dn_ip + + rt_key = 'NA' + rt_dn_ip = 'NA' + datanodes = self._get_datanodes_info() + LOG.debug("Data nodes: %s", datanodes) + + for key, value in datanodes.items(): + if value['personality'] == 'datanode': + if self.my_dnid != value['hypervisor_id']: + LOG.debug("reflection target hypervisor_id: %s", + value['hypervisor_id']) + LOG.debug("my hypervisor_id: %s", self.my_dnid) + rt_dn_ip = value['data_ip'] + rt_key = value['hypervisor_id'] + + if only_select: + return rt_key, rt_dn_ip + + return rt_key, rt_dn_ip + + def _create_replica(self, volume, metadata): + """Create vdisk on peer data node.""" + + try: + reflection_target_ip = None + rt_routing_key, reflection_target_ip = ( + self._select_rt(volume, metadata)) + LOG.debug("_create_replica %(rt_key)s %(rt_ip)s", + {"rt_key": rt_routing_key, + "rt_ip": reflection_target_ip}) + + metadata_update = {} + metadata_update['Secondary_datanode_key'] = rt_routing_key + metadata_update['Secondary_datanode_ip'] = reflection_target_ip + + if rt_routing_key is None or rt_routing_key == 'NA': + return False, None, metadata_update + + instance_id = self._get_volume_metadata_value(metadata, + 'InstanceId') + + util.message_data_plane( + rt_routing_key, + 'hyperscale.storage.dm.volume.create', + pool_name=POOL_NAME, + volume_guid=util.get_guid_with_curly_brackets( + volume['id']), + display_name=util.get_guid_with_curly_brackets( + volume['id']), + volume_raw_size=volume['size'], + vm_id=util.get_guid_with_curly_brackets( + six.text_type(instance_id)), + is_reflection_source=0, + dn_reflection_factor=1, + reflection_src_ip=self.datanode_ip, + user_id=util.get_guid_with_curly_brackets( + volume['user_id']), + project_id=util.get_guid_with_curly_brackets( + volume['project_id']), + volume_qos=1) + # Failure handling TBD. + ret = True + LOG.debug("Create volume sent to reflection target data node") + + except (exception.VolumeNotFound, + exception.UnableToProcessHyperScaleCmdOutput, + exception.ErrorInSendingMsg): + LOG.error("Exception in creating replica", exc_info = True) + metadata_update['Secondary_datanode_key'] = 'NA' + metadata_update['Secondary_datanode_ip'] = 'NA' + metadata_update['DN_Resiliency'] = 'degraded' + ret = False + return ret, reflection_target_ip, metadata_update + + def _get_volume_details_for_create_volume(self, + reflection_target_ip, + volume, + metadata): + + instance_id = self._get_volume_metadata_value(metadata, + 'InstanceId') + volume_details = {} + volume_details['pool_name'] = POOL_NAME + volume_details['volume_guid'] = ( + util.get_guid_with_curly_brackets(volume['id'])) + volume_details['display_name'] = ( + util.get_guid_with_curly_brackets(volume['id'])) + volume_details['volume_raw_size'] = volume['size'] + volume_details['vm_id'] = util.get_guid_with_curly_brackets( + six.text_type(instance_id)) + volume_details['user_id'] = util.get_guid_with_curly_brackets( + volume['user_id']) + volume_details['project_id'] = ( + util.get_guid_with_curly_brackets(volume['project_id'])) + volume_details['volume_qos'] = 1 + volume_details['dn_reflection_factor'] = 0 + + if reflection_target_ip is not None: + volume_details['is_reflection_source'] = 1 + volume_details['dn_reflection_factor'] = 1 + volume_details['reflection_target_ip'] = reflection_target_ip + + return volume_details + + def _get_volume_metadata(self, volume): + volume_metadata = {} + if 'volume_metadata' in volume: + for metadata in volume['volume_metadata']: + volume_metadata[metadata['key']] = metadata['value'] + return volume_metadata + + def _get_volume_metadata_value(self, metadata, metadata_key): + metadata_value = None + if metadata: + metadata_value = metadata.get(metadata_key) + + LOG.debug("Volume metadata key %(m_key)s, value %(m_val)s", + {"m_key": metadata_key, "m_val": metadata_value}) + return metadata_value + + @utils.trace_method + def create_volume(self, volume): + """Creates a hyperscale volume.""" + + model_update = {} + metadata_update = {} + reflection_target_ip = None + LOG.debug("Create volume") + try: + volume_metadata = self._get_volume_metadata(volume) + + # 1. Check how many replicas needs to be created. + replicas = self._get_replicas(volume, volume_metadata) + if replicas > 1: + # 2. Create replica on peer datanode. + LOG.debug("Create volume message sent to peer data node") + ret, reflection_target_ip, metadata_update = ( + self._create_replica(volume, volume_metadata)) + if ret is False: + metadata_update['DN_Resiliency'] = 'degraded' + # Do not fail volume creation, just create one replica. + reflection_target_ip = None + + # 3. Get volume details based on reflection factor + # for volume + volume_details = self._get_volume_details_for_create_volume( + reflection_target_ip, volume, volume_metadata) + + # 4. Send create volume to data node with volume details + util.message_data_plane( + self.dn_routing_key, + 'hyperscale.storage.dm.volume.create', + **volume_details) + LOG.debug("Create volume message sent to data node") + + volume_metadata['Primary_datanode_ip'] = self.datanode_ip + volume_metadata['current_dn_owner'] = self.dn_routing_key + volume_metadata['current_dn_ip'] = self.datanode_ip + volume_metadata['hs_image_id'] = util.get_hyperscale_image_id() + volume_metadata.update(metadata_update) + + volume['provider_location'] = PROVIDER_LOCATION + model_update = {'provider_location': volume['provider_location'], + 'metadata': volume_metadata} + + except (exception.UnableToProcessHyperScaleCmdOutput, + exception.ErrorInSendingMsg): + with excutils.save_and_reraise_exception(): + LOG.exception('Unable to create hyperscale volume') + + return model_update + + @utils.trace_method + def delete_volume(self, volume): + """Deletes a volume.""" + + LOG.debug("Delete volume with id %s", volume['id']) + # 1. Check for provider location + if not volume['provider_location']: + LOG.warning('Volume %s does not have provider_location specified', + volume['name']) + raise exception.VolumeMetadataNotFound( + volume_id=volume['id'], + metadata_key='provider_location') + + # 2. Message data plane for volume deletion + message_body = {'display_name': volume['name']} + + # if Secondary_datanode_key is present, + # delete the replica from secondary datanode. + rt_key = None + + # Get metadata for volume + metadata = self._get_volume_metadata(volume) + + rt_key = self._get_volume_metadata_value(metadata, + 'Secondary_datanode_key') + rt_dn_ip = self._get_volume_metadata_value(metadata, + 'Secondary_datanode_ip') + current_dn_ip = self._get_volume_metadata_value(metadata, + 'current_dn_ip') + if current_dn_ip is not None and rt_dn_ip == current_dn_ip: + rt_key = None + + # Send Delete Volume to Data Node + try: + if rt_key is not None: + util.message_data_plane( + rt_key, + 'hyperscale.storage.dm.volume.delete', + **message_body) + + util.message_data_plane( + self.dn_routing_key, + 'hyperscale.storage.dm.volume.delete', + **message_body) + + except (exception.UnableToProcessHyperScaleCmdOutput, + exception.ErrorInSendingMsg): + LOG.error('Exception while deleting volume', exc_info=True) + raise exception.VolumeIsBusy(volume_name=volume['name']) + + @utils.trace_method + def create_snapshot(self, snapshot): + """Create a snapshot.""" + + LOG.debug("Create Snapshot %s", snapshot['volume_id']) + workflow_id = None + last_in_eds_seq = None + model_update = {} + rt_key = None + + # Get metadata for volume + snapshot_volume = snapshot.get('volume') + metadata = snapshot_volume['metadata'] + rt_key = self._get_volume_metadata_value(metadata, + 'current_dn_owner') + if rt_key is None: + rt_key = self.dn_routing_key + + # Check for episodic based on metadata key + workflow_snap = 0 + + meta = snapshot.get('metadata') + LOG.debug('Snapshot metatadata %s', meta) + if 'SNAPSHOT-COOKIE' in meta.keys(): + snapsize = meta['SIZE'] + + # Call DataNode for episodic snapshots + LOG.debug('Calling Data Node for episodic snapshots') + message_body = {} + message_body['snapshot_id'] = ( + util.get_guid_with_curly_brackets(snapshot['id'])) + message_body['volume_guid'] = ( + util.get_guid_with_curly_brackets( + snapshot['volume_id'])) + message_body['snapshot_cookie'] = meta['SNAPSHOT-COOKIE'] + + try: + # send message to data node + util.message_data_plane( + rt_key, + 'hyperscale.storage.dm.volume.snapshot.update', + **message_body) + + # Update size via cinder api + if snapsize is not None: + model_update['volume_size'] = snapsize.value + + # Set the episodic type metatdata for filtering purpose + meta['TYPE'] = TYPE_EPISODIC_SNAP + meta['status'] = 'available' + meta['datanode_ip'] = self.datanode_ip + + except (exception.VolumeNotFound, + exception.UnableToExecuteHyperScaleCmd, + exception.UnableToProcessHyperScaleCmdOutput): + with excutils.save_and_reraise_exception(): + LOG.exception('Exception in create snapshot') + + model_update['metadata'] = meta + return model_update + + else: + out_meta = util.episodic_snap(meta) + if out_meta.get('update'): + meta['TYPE'] = out_meta.get('TYPE') + meta['status'] = out_meta.get('status') + meta['datanode_ip'] = self.datanode_ip + model_update['metadata'] = meta + return model_update + + if 'workflow_id' in meta.keys(): + workflow_snap = 1 + workflow_id = meta['workflow_id'] + + if 'monitor_snap' in meta.keys(): + if int(meta['monitor_snap']) == constants.SNAP_RESTORE_RF: + last_in_eds_seq = 0 + else: + last_in_eds_seq = 1 + + # If code falls through here then it mean its user initiated snapshots + try: + # Get metadata for volume + vsa_routing_key = None + snapshot_volume = snapshot.get('volume') + metadata = snapshot_volume['metadata'] + LOG.debug('Calling Compute Node for user initiated snapshots') + vsa_ip = self._get_volume_metadata_value(metadata, + 'acting_vdisk_owner') + if vsa_ip is None: + vsa_ip = self._get_volume_metadata_value(metadata, 'vsa_ip') + + LOG.debug("Create snap on compute vsa %s", vsa_ip) + if vsa_ip: + vsa_routing_key = vsa_ip.replace('.', '') + + message_body = {} + # Set the parent volume id + message_body['vdisk_id_str'] = ( + util.get_guid_with_curly_brackets( + snapshot['volume_id'])) + # Set the snapshot details + message_body['snapshot_id_str'] = ( + util.get_guid_with_curly_brackets(snapshot['id'])) + message_body['snapshot_name'] = snapshot['name'] + + if workflow_snap == 1: + message_body['workflow_snapshot'] = 1 + else: + message_body['user_initiated'] = 1 + + if last_in_eds_seq is not None: + message_body['last_in_eds_seq'] = last_in_eds_seq + + # send message to compute node + util.message_compute_plane( + vsa_routing_key, + 'hyperscale.storage.nfs.volume.snapshot.create', + **message_body) + + # Set the snapshot type to either workflow or user initiated + # snapshot in metatdata for filtering purpose + if workflow_snap: + LOG.debug('__help request for WORKFLOW snapshot') + meta['TYPE'] = TYPE_WORKFLOW_SNAP + meta['status'] = 'creating' + meta['datanode_ip'] = self.datanode_ip + else: + LOG.debug('__help request for MANUAL snapshot') + meta['TYPE'] = TYPE_USER_SNAP + meta['status'] = 'creating' + meta['datanode_ip'] = self.datanode_ip + + if workflow_id is not None: + message_body = {} + message_body['workflow_id'] = workflow_id + message_body['skip_upto_sentinel'] = ( + 'hyperscale.vdisk.failback.snapmark_sentinel') + + # send message to controller node + util.message_controller( + constants.HS_CONTROLLER_EXCH, + 'hyperscale.controller.execute.workflow', + **message_body) + + except (exception.VolumeNotFound, + exception.UnableToExecuteHyperScaleCmd, + exception.UnableToProcessHyperScaleCmdOutput): + with excutils.save_and_reraise_exception(): + LOG.exception('Exception in create snapshot') + + model_update['metadata'] = meta + return model_update + + @utils.trace_method + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + + meta = snapshot.get('metadata') + if 'force' in meta.keys(): + LOG.debug("Found force flag for snapshot metadata." + " Not sending call to datanode ") + LOG.debug('snapshot metadata %s', meta) + return + + if 'is_busy' in meta.keys(): + LOG.warning("Snapshot %s is being used, skipping delete", + snapshot['id']) + raise exception.SnapshotIsBusy(snapshot_name=snapshot['id']) + else: + LOG.warning("Snapshot %s is being deleted," + " is_busy key not present", snapshot['id']) + + message_body = {} + message_body['volume_guid'] = ( + util.get_guid_with_curly_brackets(snapshot['volume_id'])) + message_body['snapshot_id'] = ( + util.get_guid_with_curly_brackets(snapshot['id'])) + + # HyperScale snapshots whether Episodic or User initiated, all resides + # in the data plane. + # Hence delete snapshot operation will go to datanode + rt_key = None + + # Get metadata for volume + snapshot_volume = snapshot.get('volume') + metadata = snapshot_volume['metadata'] + rt_key = self._get_volume_metadata_value(metadata, + 'current_dn_owner') + if rt_key is None: + rt_key = self.dn_routing_key + + try: + # send message to data node + util.message_data_plane( + rt_key, + 'hyperscale.storage.dm.version.delete', + **message_body) + + except (exception.UnableToExecuteHyperScaleCmd, + exception.UnableToProcessHyperScaleCmdOutput): + with excutils.save_and_reraise_exception(): + LOG.exception('Exception in delete snapshot') + + @utils.trace_method + def create_volume_from_snapshot(self, volume, snapshot): + """Create volume from snapshot.""" + + LOG.debug("Create volume from snapshot") + model_update = {} + try: + LOG.debug("Clone new volume %(t_id)s from snapshot with id" + " %(s_id)s", {"t_id": volume['id'], + "s_id": volume['snapshot_id']}) + # 1. Make a call to DN + # Check if current_dn_owner is set. + # Route the snapshot creation request to current_dn_owner + + rt_key = None + + # Get metadata for volume + snap_vol = snapshot['volume'] + metadata = snap_vol['metadata'] + rt_key = self._get_volume_metadata_value(metadata, + 'current_dn_owner') + if rt_key is None: + rt_key = self.dn_routing_key + + util.message_data_plane( + rt_key, + 'hyperscale.storage.dm.volume.clone.create', + pool_name=POOL_NAME, + display_name=util.get_guid_with_curly_brackets( + volume['id']), + version_name=util.get_guid_with_curly_brackets( + volume['snapshot_id']), + volume_raw_size=volume['size'], + volume_qos=1, + parent_volume_guid=util.get_guid_with_curly_brackets( + snapshot['volume_id']), + user_id=util.get_guid_with_curly_brackets( + volume['user_id']), + project_id=util.get_guid_with_curly_brackets( + volume['project_id']), + volume_guid=util.get_guid_with_curly_brackets( + volume['id'])) + + LOG.debug("Volume created successfully on data node") + + # Get metadata for volume + volume_metadata = self._get_volume_metadata(volume) + parent_cur_dn = self._get_volume_metadata_value(metadata, + 'current_dn_ip') + + metadata_update = {} + metadata_update['snapshot_id'] = snapshot['id'] + metadata_update['parent_volume_guid'] = ( + util.get_guid_with_curly_brackets( + snapshot['volume_id'])) + metadata_update['Primary_datanode_ip'] = parent_cur_dn + metadata_update['current_dn_owner'] = rt_key + metadata_update['current_dn_ip'] = parent_cur_dn + + # 2. Choose a potential replica here. + # The actual decision to have potential replica is made in NOVA. + rt_key, rt_dn_ip = self._select_rt(volume, + volume_metadata, + only_select=True) + + if rt_key and rt_dn_ip: + metadata_update['Potential_secondary_key'] = rt_key + metadata_update['Potential_secondary_ip'] = rt_dn_ip + + except (exception.UnableToExecuteHyperScaleCmd, + exception.UnableToProcessHyperScaleCmdOutput): + with excutils.save_and_reraise_exception(): + LOG.exception('Exception in creating volume from snapshot') + except exception.InvalidMetadataType: + with excutils.save_and_reraise_exception(): + LOG.exception('Exception updating metadata in create' + ' volume from snapshot') + + volume_metadata.update(metadata_update) + + volume['provider_location'] = PROVIDER_LOCATION + model_update = {'provider_location': volume['provider_location'], + 'metadata': volume_metadata} + + return model_update + + @utils.trace_method + def get_volume_stats(self, refresh=False): + """Get volume status.""" + + # If 'refresh' is True, run update the stats first. + + LOG.debug("Get volume status") + + self._stats = self._fetch_volume_status() + new_total = self._stats['total_capacity_gb'] + new_free = self._stats['free_capacity_gb'] + + if self.old_total != new_total or self.old_free != new_free: + self.old_total = new_total + self.old_free = new_free + + message_body = {'hostname': self.datanode_hostname, + 'is_admin': 1, + 'total': new_total, + 'free': new_free} + try: + cmd_out, cmd_error = util.message_controller( + constants.HS_CONTROLLER_EXCH, + 'hyperscale.controller.set.datanode.storage.stats', + **message_body) + LOG.debug("Response Message from Controller: %s", + cmd_out) + + except (exception.UnableToExecuteHyperScaleCmd, + exception.UnableToProcessHyperScaleCmdOutput): + with excutils.save_and_reraise_exception(): + LOG.exception('Exception during fetch stats') + + return self._stats + + @utils.trace_method + def extend_volume(self, volume, size_gb): + """Extend volume.""" + + LOG.debug("Extend volume") + try: + message_body = {} + message_body['volume_guid'] = ( + util.get_guid_with_curly_brackets(volume['id'])) + message_body['new_size'] = size_gb + + # Send Extend Volume message to Data Node + util.message_data_plane( + self.dn_routing_key, + 'hyperscale.storage.dm.volume.extend', + **message_body) + + except (exception.UnableToProcessHyperScaleCmdOutput, + exception.ErrorInSendingMsg): + msg = _('Exception in extend volume %s') % volume['name'] + LOG.exception(msg) + raise exception.VolumeDriverException(message=msg) + + def _fetch_volume_status(self): + """Retrieve Volume Stats from Datanode.""" + + LOG.debug("Request Volume Stats from Datanode") + + data = {} + data["volume_backend_name"] = 'Veritas_HyperScale' + data["vendor_name"] = 'Veritas Technologies LLC' + data["driver_version"] = self.VERSION + data["storage_protocol"] = 'nfs' + data['total_capacity_gb'] = 0.0 + data['free_capacity_gb'] = 0.0 + data['reserved_percentage'] = self.configuration.reserved_percentage + data['QoS_support'] = False + + try: + message_body = {} + # send message to data node + cmd_out, cmd_error = util.message_data_plane( + self.dn_routing_key, + 'hyperscale.storage.dm.discover.stats', + **message_body) + + LOG.debug("Response Message from Datanode: %s", cmd_out) + payload = cmd_out.get('payload') + if 'stats' in payload.keys(): + if 'total_capacity' in payload.get( + 'stats')[0].keys(): + total_capacity = payload.get( + 'stats')[0]['total_capacity'] + + if 'free_capacity' in payload.get( + 'stats')[0].keys(): + free_capacity = payload.get( + 'stats')[0]['free_capacity'] + + if total_capacity is not None: + data['total_capacity_gb'] = float(total_capacity) + data['free_capacity_gb'] = float(free_capacity) + + except (exception.UnableToExecuteHyperScaleCmd, + exception.UnableToProcessHyperScaleCmdOutput): + with excutils.save_and_reraise_exception(): + LOG.exception('Exception during fetch stats') + + return data + + @utils.trace_method + def initialize_connection(self, volume, connector): + """Allow connection to connector and return connection info.""" + data = {'export': volume['provider_location'], + 'name': volume['name']} + return { + 'driver_volume_type': 'veritas_hyperscale', + 'data': data + } + + def terminate_connection(self, volume, connector, **kwargs): + """Disallow connection from connector.""" + pass + + def ensure_export(self, ctx, volume): + """Synchronously recreates an export for a logical volume.""" + pass + + def create_export(self, ctx, volume, connector): + + # Exports the volume. Can optionally return a Dictionary of changes + # to the volume object to be persisted.""" + pass + + def remove_export(self, ctx, volume): + """Removes an export for a logical volume.""" + pass + + @utils.trace_method + def copy_image_to_volume(self, context, volume, image_service, image_id): + """Fetch the image from image_service and write it to the volume.""" + + LOG.debug("copy_image_to_volume volume: %(vol)s " + "image service: %(service)s image id: %(id)s.", + {'vol': volume, + 'service': six.text_type(image_service), + 'id': six.text_type(image_id)}) + + path = util.get_image_path(image_id) + try: + # Skip image creation if file already exists + if not os.path.isfile(path): + image_utils.fetch_to_raw(context, + image_service, + image_id, + path, + BLOCK_SIZE, + size=volume['size']) + metadata = self._get_volume_metadata(volume) + hs_img_id = self._get_volume_metadata_value(metadata, + 'hs_image_id') + util.update_image(path, volume['id'], hs_img_id) + + except (exception.UnableToExecuteHyperScaleCmd, + exception.UnableToProcessHyperScaleCmdOutput): + with excutils.save_and_reraise_exception(): + LOG.exception('Failed to copy_image_to_volume') + + @utils.trace_method + def copy_volume_to_image(self, context, volume, image_service, image_meta): + """Copy the volume to the specified image.""" + + LOG.debug("copy_volume_to_image volume: %(vol)s" + " image service:%(service)s image meta: %(meta)s.", + {'vol': volume, + 'service': six.text_type(image_service), + 'meta': six.text_type(image_meta)}) + + try: + metadata = self._get_volume_metadata(volume) + hs_img_id = self._get_volume_metadata_value(metadata, + 'hs_image_id') + path = util.get_image_path(hs_img_id, 'volume') + image_utils.upload_volume(context, + image_service, + image_meta, + path) + + except (exception.UnableToExecuteHyperScaleCmd, + exception.UnableToProcessHyperScaleCmdOutput): + with excutils.save_and_reraise_exception(): + LOG.exception('Failed to copy_volume_to_image') diff --git a/etc/cinder/rootwrap.d/volume.filters b/etc/cinder/rootwrap.d/volume.filters index 9d7acb9a2f5..47d679bf46e 100644 --- a/etc/cinder/rootwrap.d/volume.filters +++ b/etc/cinder/rootwrap.d/volume.filters @@ -10,6 +10,9 @@ tgt-admin: CommandFilter, tgt-admin, root cinder-rtstool: CommandFilter, cinder-rtstool, root scstadmin: CommandFilter, scstadmin, root +# HyperScale command to handle cinder operations +hscli: CommandFilter, hscli, root + # LVM related show commands pvs: EnvFilter, env, root, LC_ALL=C, pvs vgs: EnvFilter, env, root, LC_ALL=C, vgs diff --git a/releasenotes/notes/vrts_hyperscale_driver-5b63ab706ea8ae89.yaml b/releasenotes/notes/vrts_hyperscale_driver-5b63ab706ea8ae89.yaml new file mode 100644 index 00000000000..ebe02d3d156 --- /dev/null +++ b/releasenotes/notes/vrts_hyperscale_driver-5b63ab706ea8ae89.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added volume backend driver for Veritas HyperScale storage.