diff --git a/cinder/tests/test_vmware_datastore.py b/cinder/tests/test_vmware_datastore.py new file mode 100644 index 00000000000..3b6409ac263 --- /dev/null +++ b/cinder/tests/test_vmware_datastore.py @@ -0,0 +1,385 @@ +# Copyright (c) 2014 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit tests for datastore module. +""" + +import mock + +from cinder.openstack.common import units +from cinder import test +from cinder.volume.drivers.vmware import datastore as ds_sel +from cinder.volume.drivers.vmware import error_util + + +class DatastoreTest(test.TestCase): + """Unit tests for Datastore.""" + + def setUp(self): + super(DatastoreTest, self).setUp() + self._session = mock.Mock() + self._vops = mock.Mock() + self._ds_sel = ds_sel.DatastoreSelector(self._vops, self._session) + + def test_get_profile_id(self): + profile_id = mock.sentinel.profile_id + self._vops.retrieve_profile_id.return_value = profile_id + profile_name = mock.sentinel.profile_name + + self.assertEqual(profile_id, self._ds_sel.get_profile_id(profile_name)) + self._vops.retrieve_profile_id.assert_called_once_with(profile_name) + + def test_get_profile_id_with_invalid_profile(self): + self._vops.retrieve_profile_id.return_value = None + profile_name = mock.sentinel.profile_name + + self.assertRaises(error_util.ProfileNotFoundException, + self._ds_sel.get_profile_id, + profile_name) + self._vops.retrieve_profile_id.assert_called_once_with(profile_name) + + def _create_datastore(self, moref): + return mock.Mock(value=moref) + + def _create_summary( + self, ds, free_space=units.Mi, _type=ds_sel.DatastoreType.VMFS, + capacity=2 * units.Mi): + return mock.Mock(datastore=ds, freeSpace=free_space, type=_type, + capacity=capacity) + + @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' + '_filter_by_profile') + def test_filter_datastores(self, filter_by_profile): + # Test with empty datastore list. + datastores = [] + size_bytes = 2 * units.Mi + profile_id = mock.sentinel.profile_id + hard_anti_affinity_datastores = None + hard_affinity_ds_types = None + + self.assertEqual([], self._ds_sel._filter_datastores( + datastores, size_bytes, profile_id, hard_anti_affinity_datastores, + hard_affinity_ds_types)) + + # Test with single datastore with hard anti-affinity. + ds_1 = self._create_datastore('ds-1') + datastores = [ds_1] + hard_anti_affinity_datastores = [ds_1.value] + + self.assertEqual([], self._ds_sel._filter_datastores( + datastores, size_bytes, profile_id, hard_anti_affinity_datastores, + hard_affinity_ds_types)) + + # Extend previous case with a profile non-compliant datastore. + ds_2 = self._create_datastore('ds-2') + datastores.append(ds_2) + filter_by_profile.return_value = [] + + self.assertEqual([], self._ds_sel._filter_datastores( + datastores, size_bytes, profile_id, hard_anti_affinity_datastores, + hard_affinity_ds_types)) + filter_by_profile.assert_called_once_with([ds_2], profile_id) + + # Extend previous case with a less free space datastore. + ds_3 = self._create_datastore('ds-3') + datastores.append(ds_3) + filter_by_profile.return_value = [ds_3] + + free_space_list = [units.Mi] + type_list = [ds_sel.DatastoreType.NFS] + self._vops.get_summary.side_effect = ( + lambda ds: self._create_summary(ds, + free_space_list.pop(0), + type_list.pop(0))) + + self.assertEqual([], self._ds_sel._filter_datastores( + datastores, size_bytes, profile_id, hard_anti_affinity_datastores, + hard_affinity_ds_types)) + + # Extend previous case with a datastore not satisfying hard affinity + # datastore type requirement. + ds_4 = self._create_datastore('ds-4') + datastores.append(ds_4) + filter_by_profile.return_value = [ds_3, ds_4] + + free_space_list = [units.Mi, 4 * units.Mi] + type_list = [ds_sel.DatastoreType.NFS, ds_sel.DatastoreType.VSAN] + hard_affinity_ds_types = [ds_sel.DatastoreType.NFS] + + self.assertEqual([], self._ds_sel._filter_datastores( + datastores, size_bytes, profile_id, hard_anti_affinity_datastores, + hard_affinity_ds_types)) + + # Modify the previous case to remove hard affinity datastore type + # requirement. + free_space_list = [units.Mi, 4 * units.Mi] + type_list = [ds_sel.DatastoreType.NFS, ds_sel.DatastoreType.VSAN] + hard_affinity_ds_types = None + + res = self._ds_sel._filter_datastores( + datastores, size_bytes, profile_id, hard_anti_affinity_datastores, + hard_affinity_ds_types) + self.assertTrue(len(res) == 1) + self.assertEqual(ds_4, res[0].datastore) + + # Extend the previous case by adding a datastore satisfying + # hard affinity datastore type requirement. + ds_5 = self._create_datastore('ds-5') + datastores.append(ds_5) + filter_by_profile.return_value = [ds_3, ds_4, ds_5] + + free_space_list = [units.Mi, 4 * units.Mi, 5 * units.Mi] + type_list = [ds_sel.DatastoreType.NFS, ds_sel.DatastoreType.VSAN, + ds_sel.DatastoreType.VMFS] + hard_affinity_ds_types = [ds_sel.DatastoreType.VMFS] + + res = self._ds_sel._filter_datastores( + datastores, size_bytes, profile_id, hard_anti_affinity_datastores, + hard_affinity_ds_types) + self.assertTrue(len(res) == 1) + self.assertEqual(ds_5, res[0].datastore) + + # Modify the previous case to have two datastores satisfying + # hard affinity datastore type requirement. + free_space_list = [units.Mi, 4 * units.Mi, 5 * units.Mi] + type_list = [ds_sel.DatastoreType.NFS, ds_sel.DatastoreType.VSAN, + ds_sel.DatastoreType.VSAN] + hard_affinity_ds_types = [ds_sel.DatastoreType.VSAN] + + res = self._ds_sel._filter_datastores( + datastores, size_bytes, profile_id, hard_anti_affinity_datastores, + hard_affinity_ds_types) + self.assertTrue(len(res) == 2) + self.assertEqual(ds_4, res[0].datastore) + self.assertEqual(ds_5, res[1].datastore) + + # Clear side effects. + self._vops.get_summary.side_effect = None + + def test_select_best_summary(self): + # No tie-- all datastores with different host mount count. + summary_1 = self._create_summary(mock.sentinel.ds_1, + free_space=units.Mi, + capacity=2 * units.Mi) + summary_2 = self._create_summary(mock.sentinel.ds_2, + free_space=units.Mi, + capacity=3 * units.Mi) + summary_3 = self._create_summary(mock.sentinel.ds_3, + free_space=units.Mi, + capacity=4 * units.Mi) + + host_1 = mock.sentinel.host_1 + host_2 = mock.sentinel.host_3 + host_3 = mock.sentinel.host_3 + + connected_hosts = {mock.sentinel.ds_1: [host_1], + mock.sentinel.ds_2: [host_1, host_2], + mock.sentinel.ds_3: [host_1, host_2, host_3]} + self._vops.get_connected_hosts.side_effect = ( + lambda summary: connected_hosts[summary]) + + summaries = [summary_1, summary_2, summary_3] + (best_summary, best_utilization) = self._ds_sel._select_best_summary( + summaries) + + self.assertEqual(summary_3, best_summary) + self.assertEqual(3 / 4.0, best_utilization) + + # Tie-- two datastores with max host mount count. + summary_4 = self._create_summary(mock.sentinel.ds_4, + free_space=2 * units.Mi, + capacity=4 * units.Mi) + connected_hosts[mock.sentinel.ds_4] = ( + connected_hosts[mock.sentinel.ds_3]) + summaries.append(summary_4) + (best_summary, best_utilization) = self._ds_sel._select_best_summary( + summaries) + + self.assertEqual(summary_4, best_summary) + self.assertEqual(1 / 2.0, best_utilization) + + # Clear side effects. + self._vops.get_connected_hosts.side_effect = None + + @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' + 'get_profile_id') + @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' + '_filter_datastores') + def test_select_datastore(self, filter_datastores, get_profile_id): + # Test with no hosts. + size_bytes = units.Ki + req = {self._ds_sel.SIZE_BYTES: size_bytes} + self._vops.get_hosts.return_value = mock.Mock(objects=[]) + + self.assertEqual((), self._ds_sel.select_datastore(req)) + self._vops.get_hosts.assert_called_once_with() + + # Test with single host with no valid datastores. + host_1 = mock.sentinel.host_1 + self._vops.get_hosts.return_value = mock.Mock( + objects=[mock.Mock(obj=host_1)]) + self._vops.continue_retrieval.return_value = None + self._vops.get_dss_rp.side_effect = error_util.VimException('error') + + self.assertEqual((), self._ds_sel.select_datastore(req)) + self._vops.get_dss_rp.assert_called_once_with(host_1) + + # Test with three hosts and vCenter connection problem while fetching + # datastores for the second host. + self._vops.get_dss_rp.reset_mock() + host_2 = mock.sentinel.host_2 + host_3 = mock.sentinel.host_3 + self._vops.get_hosts.return_value = mock.Mock( + objects=[mock.Mock(obj=host_1), + mock.Mock(obj=host_2), + mock.Mock(obj=host_3)]) + self._vops.get_dss_rp.side_effect = [ + error_util.VimException('no valid datastores'), + error_util.VimConnectionException('connection error')] + + self.assertRaises(error_util.VimConnectionException, + self._ds_sel.select_datastore, + req) + get_dss_rp_exp_calls = [mock.call(host_1), mock.call(host_2)] + self.assertEqual(get_dss_rp_exp_calls, + self._vops.get_dss_rp.call_args_list) + + # Modify previous case to return datastores for second and third host, + # where none of them meet the requirements which include a storage + # profile and affinity requirements. + aff_ds_types = [ds_sel.DatastoreType.VMFS] + req[ds_sel.DatastoreSelector.HARD_AFFINITY_DS_TYPE] = aff_ds_types + + ds_1a = mock.sentinel.ds_1a + anti_affinity_ds = [ds_1a] + req[ds_sel.DatastoreSelector.HARD_ANTI_AFFINITY_DS] = anti_affinity_ds + + profile_name = mock.sentinel.profile_name + req[ds_sel.DatastoreSelector.PROFILE_NAME] = profile_name + + profile_id = mock.sentinel.profile_id + get_profile_id.return_value = profile_id + + ds_2a = mock.sentinel.ds_2a + ds_2b = mock.sentinel.ds_2b + ds_3a = mock.sentinel.ds_3a + + self._vops.get_dss_rp.reset_mock() + rp_2 = mock.sentinel.rp_2 + rp_3 = mock.sentinel.rp_3 + self._vops.get_dss_rp.side_effect = [ + error_util.VimException('no valid datastores'), + ([ds_2a, ds_2b], rp_2), + ([ds_3a], rp_3)] + + filter_datastores.return_value = [] + + self.assertEqual((), self._ds_sel.select_datastore(req)) + get_profile_id.assert_called_once_with(profile_name) + get_dss_rp_exp_calls.append(mock.call(host_3)) + self.assertEqual(get_dss_rp_exp_calls, + self._vops.get_dss_rp.call_args_list) + filter_datastores_exp_calls = [ + mock.call([ds_2a, ds_2b], size_bytes, profile_id, anti_affinity_ds, + aff_ds_types), + mock.call([ds_3a], size_bytes, profile_id, anti_affinity_ds, + aff_ds_types)] + self.assertEqual(filter_datastores_exp_calls, + filter_datastores.call_args_list) + + # Modify previous case to have a non-empty summary list after filtering + # with preferred utilization threshold unset. + self._vops.get_dss_rp.side_effect = [ + error_util.VimException('no valid datastores'), + ([ds_2a, ds_2b], rp_2), + ([ds_3a], rp_3)] + + summary_2b = self._create_summary(ds_2b, free_space=0.5 * units.Mi, + capacity=units.Mi) + filter_datastores.side_effect = [[summary_2b]] + self._vops.get_connected_hosts.return_value = [host_1] + + self.assertEqual((host_2, rp_2, summary_2b), + self._ds_sel.select_datastore(req)) + + # Modify previous case to have a preferred utilization threshold + # satsified by one datastore. + self._vops.get_dss_rp.side_effect = [ + error_util.VimException('no valid datastores'), + ([ds_2a, ds_2b], rp_2), + ([ds_3a], rp_3)] + + req[ds_sel.DatastoreSelector.PREF_UTIL_THRESH] = 0.4 + summary_3a = self._create_summary(ds_3a, free_space=0.7 * units.Mi, + capacity=units.Mi) + filter_datastores.side_effect = [[summary_2b], [summary_3a]] + + self.assertEqual((host_3, rp_3, summary_3a), + self._ds_sel.select_datastore(req)) + + # Modify previous case to have a preferred utilization threshold + # which cannot be satisfied. + self._vops.get_dss_rp.side_effect = [ + error_util.VimException('no valid datastores'), + ([ds_2a, ds_2b], rp_2), + ([ds_3a], rp_3)] + filter_datastores.side_effect = [[summary_2b], [summary_3a]] + + req[ds_sel.DatastoreSelector.PREF_UTIL_THRESH] = 0.2 + summary_2b.freeSpace = 0.75 * units.Mi + + self.assertEqual((host_2, rp_2, summary_2b), + self._ds_sel.select_datastore(req)) + + # Clear side effects. + self._vops.get_dss_rp.side_effect = None + + @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' + '_filter_by_profile') + def test_is_datastore_compliant(self, filter_by_profile): + # Test with empty profile. + profile_name = None + datastore = mock.sentinel.datastore + self.assertTrue(self._ds_sel.is_datastore_compliant(datastore, + profile_name)) + + # Test with invalid profile. + profile_name = mock.sentinel.profile_name + self._vops.retrieve_profile_id.return_value = None + self.assertRaises(error_util.ProfileNotFoundException, + self._ds_sel.is_datastore_compliant, + datastore, + profile_name) + self._vops.retrieve_profile_id.assert_called_once_with(profile_name) + + # Test with valid profile and non-compliant datastore. + self._vops.retrieve_profile_id.reset_mock() + profile_id = mock.sentinel.profile_id + self._vops.retrieve_profile_id.return_value = profile_id + filter_by_profile.return_value = [] + self.assertFalse(self._ds_sel.is_datastore_compliant(datastore, + profile_name)) + self._vops.retrieve_profile_id.assert_called_once_with(profile_name) + filter_by_profile.assert_called_once_with([datastore], profile_id) + + # Test with valid profile and compliant datastore. + self._vops.retrieve_profile_id.reset_mock() + filter_by_profile.reset_mock() + filter_by_profile.return_value = [datastore] + self.assertTrue(self._ds_sel.is_datastore_compliant(datastore, + profile_name)) + self._vops.retrieve_profile_id.assert_called_once_with(profile_name) + filter_by_profile.assert_called_once_with([datastore], profile_id) diff --git a/cinder/volume/drivers/vmware/datastore.py b/cinder/volume/drivers/vmware/datastore.py new file mode 100644 index 00000000000..a44baefa7fd --- /dev/null +++ b/cinder/volume/drivers/vmware/datastore.py @@ -0,0 +1,270 @@ +# Copyright (c) 2014 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Classes and utility methods for datastore selection. +""" + +from cinder.openstack.common import excutils +from cinder.openstack.common.gettextutils import _ +from cinder.openstack.common import log as logging +from cinder.volume.drivers.vmware import error_util +from cinder.volume.drivers.vmware import vim_util + + +LOG = logging.getLogger(__name__) + + +class DatastoreType(object): + """Supported datastore types.""" + + NFS = "nfs" + VMFS = "vmfs" + VSAN = "vsan" + + +class DatastoreSelector(object): + """Class for selecting datastores which satisfy input requirements.""" + + HARD_AFFINITY_DS_TYPE = "hardAffinityDatastoreTypes" + HARD_ANTI_AFFINITY_DS = "hardAntiAffinityDatastores" + PREF_UTIL_THRESH = "preferredUtilizationThreshold" + SIZE_BYTES = "sizeBytes" + PROFILE_NAME = "storageProfileName" + + # TODO(vbala) Remove dependency on volumeops and vim_util. + def __init__(self, vops, session): + self._vops = vops + self._session = session + + def get_profile_id(self, profile_name): + """Get vCenter profile ID for the given profile name. + + :param profile_name: profile name + :return: vCenter profile ID + :raises: ProfileNotFoundException + """ + profile_id = self._vops.retrieve_profile_id(profile_name) + if profile_id is None: + LOG.error(_("Storage profile: %s cannot be found in vCenter."), + profile_name) + raise error_util.ProfileNotFoundException( + storage_profile=profile_name) + LOG.debug("Storage profile: %(name)s resolved to vCenter profile ID: " + "%(id)s.", + {'name': profile_name, + 'id': profile_id}) + return profile_id + + def _filter_by_profile(self, datastores, profile_id): + """Filter out input datastores that do not match the given profile.""" + cf = self._session.pbm.client.factory + hubs = vim_util.convert_datastores_to_hubs(cf, datastores) + filtered_hubs = self._vops.filter_matching_hubs(hubs, profile_id) + return vim_util.convert_hubs_to_datastores(filtered_hubs, datastores) + + def _filter_datastores(self, datastores, size_bytes, profile_id, + hard_anti_affinity_datastores, + hard_affinity_ds_types): + """Filter datastores based on profile, size and affinity.""" + LOG.debug( + "Filtering datastores: %(datastores)s based on size (bytes): " + "%(size)d, profile: %(profile)s, hard-anti-affinity-datastores: " + "%(hard_anti_affinity_datastores)s, hard-affinity-datastore-types:" + " %(hard_affinity_ds_types)s.", + {'datastores': datastores, + 'size': size_bytes, + 'profile': profile_id, + 'hard_anti_affinity_datastores': hard_anti_affinity_datastores, + 'hard_affinity_ds_types': hard_affinity_ds_types}) + if hard_anti_affinity_datastores is None: + hard_anti_affinity_datastores = [] + filtered_datastores = [ds for ds in datastores if ds.value not in + hard_anti_affinity_datastores] + + if filtered_datastores and profile_id is not None: + filtered_datastores = self._filter_by_profile( + filtered_datastores, profile_id) + LOG.debug("Profile: %(id)s matched by datastores: %(datastores)s.", + {'datastores': filtered_datastores, + 'id': profile_id}) + + filtered_summaries = [self._vops.get_summary(ds) for ds in + filtered_datastores] + + def _filter(summary): + return (summary.freeSpace > size_bytes and + (hard_affinity_ds_types is None or + summary.type.lower() in hard_affinity_ds_types)) + + return filter(_filter, filtered_summaries) + + def _get_all_hosts(self): + """Get all ESX hosts managed by vCenter.""" + all_hosts = [] + + retrieve_result = self._vops.get_hosts() + while retrieve_result: + hosts = retrieve_result.objects + if not hosts: + break + + all_hosts.extend(hosts) + retrieve_result = self._vops.continue_retrieval( + retrieve_result) + return all_hosts + + def _compute_space_utilization(self, datastore_summary): + """Compute space utilization of the given datastore.""" + return ( + 1.0 - + datastore_summary.freeSpace / float(datastore_summary.capacity) + ) + + def _select_best_summary(self, summaries): + """Selects the best datastore summary. + + Selects the datastore which is connected to maximum number of hosts. + Ties are broken based on space utilization-- datastore with low space + utilization is preferred. + """ + best_summary = None + max_host_count = 0 + best_space_utilization = 1.0 + + for summary in summaries: + host_count = len(self._vops.get_connected_hosts( + summary.datastore)) + if host_count > max_host_count: + max_host_count = host_count + best_space_utilization = self._compute_space_utilization( + summary + ) + best_summary = summary + elif host_count == max_host_count: + # break the tie based on space utilization + space_utilization = self._compute_space_utilization( + summary + ) + if space_utilization < best_space_utilization: + best_space_utilization = space_utilization + best_summary = summary + + LOG.debug("Datastore: %(datastore)s is connected to %(host_count)d " + "host(s) and has space utilization: %(utilization)s.", + {'datastore': best_summary.datastore, + 'host_count': max_host_count, + 'utilization': best_space_utilization}) + return (best_summary, best_space_utilization) + + def select_datastore(self, req, hosts=None): + """Selects a datastore satisfying the given requirements. + + Returns the selected datastore summary along with a compute host and + resource pool where a VM can be created. + + :param req: selection requirements + :param hosts: list of hosts to consider + :return: (host, resourcePool, summary) + """ + best_candidate = () + best_utilization = 1.0 + + hard_affinity_ds_types = req.get( + DatastoreSelector.HARD_AFFINITY_DS_TYPE) + hard_anti_affinity_datastores = req.get( + DatastoreSelector.HARD_ANTI_AFFINITY_DS) + pref_utilization_thresh = req.get(DatastoreSelector.PREF_UTIL_THRESH, + -1) + size_bytes = req[DatastoreSelector.SIZE_BYTES] + profile_name = req.get(DatastoreSelector.PROFILE_NAME) + + profile_id = None + if profile_name is not None: + profile_id = self.get_profile_id(profile_name) + + if hosts is None: + hosts = self._get_all_hosts() + + LOG.debug("Using hosts: %(hosts)s for datastore selection based on " + "requirements: %(req)s.", + {'hosts': hosts, + 'req': req}) + for host in hosts: + host_ref = host.obj + try: + (datastores, rp) = self._vops.get_dss_rp(host_ref) + except error_util.VimConnectionException: + # No need to try other hosts when there is a connection problem + with excutils.save_and_reraise_exception(): + LOG.exception(_("Error occurred while selecting datastore." + )) + except error_util.VimException: + # TODO(vbala) volumeops.get_dss_rp shouldn't throw VimException + # for empty datastore list. + LOG.warn(_("Unable to fetch datastores connected to host %s."), + host_ref, + exc_info=True) + continue + + if not datastores: + continue + + filtered_summaries = self._filter_datastores( + datastores, size_bytes, profile_id, + hard_anti_affinity_datastores, hard_affinity_ds_types) + LOG.debug("Datastores remaining after filtering: %s.", + filtered_summaries) + + if not filtered_summaries: + continue + + (summary, utilization) = self._select_best_summary( + filtered_summaries) + if (pref_utilization_thresh == -1 or + utilization <= pref_utilization_thresh): + return (host_ref, rp, summary) + + if utilization < best_utilization: + best_candidate = (host_ref, rp, summary) + best_utilization = utilization + + LOG.debug("Best candidate: %s.", best_candidate) + return best_candidate + + def is_datastore_compliant(self, datastore, profile_name): + """Check if the datastore is compliant with given profile. + + :param datastore: datastore to check the compliance + :param profile_name: profile to check the compliance against + :return: True if the datastore is compliant; False otherwise + :raises: ProfileNotFoundException + """ + LOG.debug("Checking datastore: %(datastore)s compliance against " + "profile: %(profile)s.", + {'datastore': datastore, + 'profile': profile_name}) + if profile_name is None: + # Any datastore is trivially compliant with a None profile. + return True + + profile_id = self.get_profile_id(profile_name) + is_compliant = bool(self._filter_by_profile([datastore], profile_id)) + LOG.debug("Compliance is %(is_compliant)s for datastore: " + "%(datastore)s against profile: %(profile)s.", + {'is_compliant': is_compliant, + 'datastore': datastore, + 'profile': profile_name}) + return is_compliant diff --git a/cinder/volume/drivers/vmware/error_util.py b/cinder/volume/drivers/vmware/error_util.py index ddd9a9ce165..5696bc700bd 100644 --- a/cinder/volume/drivers/vmware/error_util.py +++ b/cinder/volume/drivers/vmware/error_util.py @@ -88,3 +88,8 @@ class ImageTransferException(VMwareDriverException): class VirtualDiskNotFoundException(VMwareDriverException): """Thrown when virtual disk is not found.""" message = _("There is no virtual disk device.") + + +class ProfileNotFoundException(VMwareDriverException): + """Thrown when the given storage profile cannot be found.""" + message = _("Storage profile: %(storage_profile)s not found.") diff --git a/cinder/volume/drivers/vmware/volumeops.py b/cinder/volume/drivers/vmware/volumeops.py index dc54eccb7d7..ce3b92632e7 100644 --- a/cinder/volume/drivers/vmware/volumeops.py +++ b/cinder/volume/drivers/vmware/volumeops.py @@ -1286,13 +1286,17 @@ class VMwareVolumeOps(object): profile_manager = pbm.service_content.profileManager res_type = pbm.client.factory.create('ns0:PbmProfileResourceType') res_type.resourceType = 'STORAGE' + profiles = [] profileIds = self._session.invoke_api(pbm, 'PbmQueryProfile', profile_manager, resourceType=res_type) LOG.debug("Got profile IDs: %s", profileIds) - return self._session.invoke_api(pbm, 'PbmRetrieveContent', - profile_manager, - profileIds=profileIds) + + if profileIds: + profiles = self._session.invoke_api(pbm, 'PbmRetrieveContent', + profile_manager, + profileIds=profileIds) + return profiles def retrieve_profile_id(self, profile_name): """Get the profile uuid from current VC for given profile name.