From 294425d347bdbd8e6df1737e320909fa8a15f8fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Dulko?= Date: Tue, 17 Nov 2015 21:31:12 +0100 Subject: [PATCH] Switch request_spec in create volume calls to ovo To be able to make changes in request_spec passed in create_volume calls (in c-sch and c-vol RPC API) while supporting rolling upgrades we need to convert it from arbitrary dict blob to a well-defined versioned object. This commit does so while maintaining backwards compatibility with Mitaka. When version is pinned to Mitaka we're serializing the dict before sending and when receiving a dict we're converting it to the object. We actually have a lot of duplicated/unused data sent using request_spec and filter_properties. We should start to clean that up. The future patches should: * Convert filter_properties to ovo. * Make services use only one occurrence of the data. * Remove the unused duplicates while maintaining backwards compatibility through object translations. Change-Id: I7dae83667a1aca92c552fbfaa1e90c6558e293bf Partial-Implements: blueprint cinder-objects --- cinder/objects/__init__.py | 1 + cinder/objects/base.py | 1 + cinder/objects/request_spec.py | 126 ++++++++++++++++++ cinder/objects/volume_type.py | 2 +- cinder/scheduler/filter_scheduler.py | 17 +-- cinder/scheduler/manager.py | 5 + cinder/scheduler/rpcapi.py | 9 +- cinder/tests/unit/objects/test_objects.py | 4 +- .../unit/scheduler/test_filter_scheduler.py | 43 ++++-- cinder/tests/unit/scheduler/test_rpcapi.py | 19 ++- cinder/tests/unit/scheduler/test_scheduler.py | 25 ++-- cinder/tests/unit/test_volume.py | 10 +- cinder/tests/unit/test_volume_rpcapi.py | 11 +- cinder/volume/flows/api/create_volume.py | 22 ++- cinder/volume/manager.py | 7 +- cinder/volume/rpcapi.py | 22 ++- tools/lintstack.py | 2 + 17 files changed, 273 insertions(+), 53 deletions(-) create mode 100644 cinder/objects/request_spec.py diff --git a/cinder/objects/__init__.py b/cinder/objects/__init__.py index fa147208f0a..5d598556577 100644 --- a/cinder/objects/__init__.py +++ b/cinder/objects/__init__.py @@ -29,6 +29,7 @@ def register_all(): __import__('cinder.objects.cluster') __import__('cinder.objects.consistencygroup') __import__('cinder.objects.qos_specs') + __import__('cinder.objects.request_spec') __import__('cinder.objects.service') __import__('cinder.objects.snapshot') __import__('cinder.objects.volume') diff --git a/cinder/objects/base.py b/cinder/objects/base.py index 590de3e1575..56d49b62da9 100644 --- a/cinder/objects/base.py +++ b/cinder/objects/base.py @@ -106,6 +106,7 @@ OBJ_VERSIONS.add('1.6', {'QualityOfServiceSpecs': '1.0', OBJ_VERSIONS.add('1.7', {'Cluster': '1.0', 'ClusterList': '1.0', 'Service': '1.4', 'Volume': '1.4', 'ConsistencyGroup': '1.3'}) +OBJ_VERSIONS.add('1.8', {'RequestSpec': '1.0', 'VolumeProperties': '1.0'}) class CinderObjectRegistry(base.VersionedObjectRegistry): diff --git a/cinder/objects/request_spec.py b/cinder/objects/request_spec.py new file mode 100644 index 00000000000..dc9f62d9e16 --- /dev/null +++ b/cinder/objects/request_spec.py @@ -0,0 +1,126 @@ +# Copyright 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_versionedobjects import fields + +from cinder import objects +from cinder.objects import base + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +@base.CinderObjectRegistry.register +class RequestSpec(base.CinderObject, base.CinderObjectDictCompat, + base.CinderComparableObject): + # Version 1.0: Initial version + VERSION = '1.0' + + fields = { + 'consistencygroup_id': fields.UUIDField(nullable=True), + 'cgsnapshot_id': fields.UUIDField(nullable=True), + 'image_id': fields.UUIDField(nullable=True), + 'snapshot_id': fields.UUIDField(nullable=True), + 'source_replicaid': fields.UUIDField(nullable=True), + 'source_volid': fields.UUIDField(nullable=True), + 'volume_id': fields.UUIDField(nullable=True), + 'volume': fields.ObjectField('Volume', nullable=True), + 'volume_type': fields.ObjectField('VolumeType', nullable=True), + 'volume_properties': fields.ObjectField('VolumeProperties', + nullable=True), + 'CG_backend': fields.StringField(nullable=True), + } + + obj_extra_fields = ['resource_properties'] + + @property + def resource_properties(self): + # TODO(dulek): This is to maintain compatibility with filters from + # oslo-incubator. As we've moved them into our codebase we should adapt + # them to use volume_properties and remove this shim. + return self.volume_properties + + @classmethod + def from_primitives(cls, spec): + """Returns RequestSpec object creating it from legacy dictionary. + + FIXME(dulek): This should go away in early O as we stop supporting + backward compatibility with M. + """ + spec = spec.copy() + spec_obj = cls() + + vol_props = spec.pop('volume_properties', {}) + if vol_props is not None: + vol_props = VolumeProperties(**vol_props) + spec_obj.volume_properties = vol_props + + if 'volume' in spec: + vol = spec.pop('volume', {}) + vol.pop('name', None) + if vol is not None: + vol = objects.Volume(**vol) + spec_obj.volume = vol + + if 'volume_type' in spec: + vol_type = spec.pop('volume_type', {}) + if vol_type is not None: + vol_type = objects.VolumeType(**vol_type) + spec_obj.volume_type = vol_type + + spec.pop('resource_properties', None) + + for k, v in spec.items(): + setattr(spec_obj, k, v) + + return spec_obj + + +@base.CinderObjectRegistry.register +class VolumeProperties(base.CinderObject, base.CinderObjectDictCompat): + # Version 1.0: Initial version + VERSION = '1.0' + + # TODO(dulek): We should add this to initially move volume_properites to + # ovo, but this should be removed as soon as possible. Most of the data + # here is already in request_spec and volume there. Outstanding ones would + # be reservation, and qos_specs. First one may be moved to request_spec and + # second added as relationship in volume_type field and whole + # volume_properties (and resource_properties) in request_spec won't be + # needed. + + fields = { + 'attach_status': fields.StringField(nullable=True), + 'availability_zone': fields.StringField(nullable=True), + 'cgsnapshot_id': fields.UUIDField(nullable=True), + 'consistencygroup_id': fields.UUIDField(nullable=True), + 'display_description': fields.StringField(nullable=True), + 'display_name': fields.StringField(nullable=True), + 'encryption_key_id': fields.UUIDField(nullable=True), + 'metadata': fields.DictOfStringsField(nullable=True), + 'multiattach': fields.BooleanField(nullable=True), + 'project_id': fields.StringField(nullable=True), + 'qos_specs': fields.DictOfStringsField(nullable=True), + 'replication_status': fields.StringField(nullable=True), + 'reservations': fields.ListOfStringsField(nullable=True), + 'size': fields.IntegerField(nullable=True), + 'snapshot_id': fields.UUIDField(nullable=True), + 'source_replicaid': fields.UUIDField(nullable=True), + 'source_volid': fields.UUIDField(nullable=True), + 'status': fields.StringField(nullable=True), + 'user_id': fields.StringField(nullable=True), + 'volume_type_id': fields.UUIDField(nullable=True), + } diff --git a/cinder/objects/volume_type.py b/cinder/objects/volume_type.py index 5f8debb8d08..3a017703406 100644 --- a/cinder/objects/volume_type.py +++ b/cinder/objects/volume_type.py @@ -65,7 +65,7 @@ class VolumeType(base.CinderPersistentObject, base.CinderObject, @staticmethod def _from_db_object(context, type, db_type, expected_attrs=None): if expected_attrs is None: - expected_attrs = [] + expected_attrs = ['extra_specs', 'projects'] for name, field in type.fields.items(): if name in OPTIONAL_FIELDS: continue diff --git a/cinder/scheduler/filter_scheduler.py b/cinder/scheduler/filter_scheduler.py index 5db592c6c88..e87c2f67d5b 100644 --- a/cinder/scheduler/filter_scheduler.py +++ b/cinder/scheduler/filter_scheduler.py @@ -22,6 +22,7 @@ Weighing Functions. from oslo_config import cfg from oslo_log import log as logging +from oslo_serialization import jsonutils from cinder import exception from cinder.i18n import _, _LE, _LW @@ -257,27 +258,26 @@ class FilterScheduler(driver.Scheduler): """ elevated = context.elevated() - volume_properties = request_spec['volume_properties'] # Since Cinder is using mixed filters from Oslo and it's own, which # takes 'resource_XX' and 'volume_XX' as input respectively, copying # 'volume_XX' to 'resource_XX' will make both filters happy. - resource_properties = volume_properties.copy() - volume_type = request_spec.get("volume_type", None) - resource_type = request_spec.get("volume_type", None) - request_spec.update({'resource_properties': resource_properties}) + volume_type = resource_type = request_spec.get("volume_type") config_options = self._get_configuration_options() if filter_properties is None: filter_properties = {} - self._populate_retry(filter_properties, resource_properties) + self._populate_retry(filter_properties, + request_spec['volume_properties']) if resource_type is None: msg = _("volume_type cannot be None") raise exception.InvalidVolumeType(reason=msg) + request_spec_dict = jsonutils.to_primitive(request_spec) + filter_properties.update({'context': context, - 'request_spec': request_spec, + 'request_spec': request_spec_dict, 'config_options': config_options, 'volume_type': volume_type, 'resource_type': resource_type}) @@ -288,7 +288,8 @@ class FilterScheduler(driver.Scheduler): # If multiattach is enabled on a volume, we need to add # multiattach to extra specs, so that the capability # filtering is enabled. - multiattach = volume_properties.get('multiattach', False) + multiattach = request_spec['volume_properties'].get('multiattach', + False) if multiattach and 'multiattach' not in resource_type.get( 'extra_specs', {}): if 'extra_specs' not in resource_type: diff --git a/cinder/scheduler/manager.py b/cinder/scheduler/manager.py index ae0fdece412..6e8f7bb03fc 100644 --- a/cinder/scheduler/manager.py +++ b/cinder/scheduler/manager.py @@ -136,6 +136,11 @@ class SchedulerManager(manager.Manager): # volume by its volume_id. volume = objects.Volume.get_by_id(context, volume_id) + # FIXME(dulek): Remove this in v3.0 of RPC API. + if isinstance(request_spec, dict): + # We may receive request_spec as dict from older clients. + request_spec = objects.RequestSpec.from_primitives(request_spec) + try: flow_engine = create_volume.get_flow(context, db, self.driver, diff --git a/cinder/scheduler/rpcapi.py b/cinder/scheduler/rpcapi.py index 35020b05a98..7f01fc77933 100644 --- a/cinder/scheduler/rpcapi.py +++ b/cinder/scheduler/rpcapi.py @@ -53,9 +53,10 @@ class SchedulerAPI(rpc.RPCAPI): 2.0 - Remove 1.x compatibility 2.1 - Adds support for sending objects over RPC in manage_existing() + 2.2 - Sends request_spec as object in create_volume() """ - RPC_API_VERSION = '2.1' + RPC_API_VERSION = '2.2' TOPIC = CONF.scheduler_topic BINARY = 'cinder-scheduler' @@ -87,7 +88,11 @@ class SchedulerAPI(rpc.RPCAPI): 'snapshot_id': snapshot_id, 'image_id': image_id, 'request_spec': request_spec_p, 'filter_properties': filter_properties, 'volume': volume} - version = '2.0' + version = '2.2' + if not self.client.can_send_version('2.2'): + # Send request_spec as dict + version = '2.0' + msg_args['request_spec'] = jsonutils.to_primitive(request_spec) cctxt = self.client.prepare(version=version) return cctxt.cast(ctxt, 'create_volume', **msg_args) diff --git a/cinder/tests/unit/objects/test_objects.py b/cinder/tests/unit/objects/test_objects.py index 6441db188a4..ccd20a5c58c 100644 --- a/cinder/tests/unit/objects/test_objects.py +++ b/cinder/tests/unit/objects/test_objects.py @@ -34,6 +34,7 @@ object_data = { 'ConsistencyGroupList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'QualityOfServiceSpecs': '1.0-0b212e0a86ee99092229874e03207fe8', 'QualityOfServiceSpecsList': '1.0-1b54e51ad0fc1f3a8878f5010e7e16dc', + 'RequestSpec': '1.0-42685a616bd27c2a4d75cba93a81ed8c', 'Service': '1.4-c7d011989d1718ca0496ccf640b42712', 'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'Snapshot': '1.1-37966f7141646eb29e9ad5298ff2ca8a', @@ -42,6 +43,7 @@ object_data = { 'VolumeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'VolumeAttachment': '1.0-b30dacf62b2030dd83d8a1603f1064ff', 'VolumeAttachmentList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', + 'VolumeProperties': '1.0-42f00cf1f6c657377a3e2a7efbed0bca', 'VolumeType': '1.2-02ecb0baac87528d041f4ddd95b95579', 'VolumeTypeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', } @@ -90,7 +92,7 @@ class TestObjectVersions(test.TestCase): classes = base.CinderObjectRegistry.obj_classes() for name, cls in classes.items(): - if not issubclass(cls[0], base.ObjectListBase): + if issubclass(cls[0], base.CinderPersistentObject): db_model = db.get_model_for_versioned_object(cls[0]) _check_table_matched(db_model, cls[0]) diff --git a/cinder/tests/unit/scheduler/test_filter_scheduler.py b/cinder/tests/unit/scheduler/test_filter_scheduler.py index b1c896d0925..d92193065fb 100644 --- a/cinder/tests/unit/scheduler/test_filter_scheduler.py +++ b/cinder/tests/unit/scheduler/test_filter_scheduler.py @@ -20,8 +20,10 @@ import mock from cinder import context from cinder import exception +from cinder import objects from cinder.scheduler import filter_scheduler from cinder.scheduler import host_manager +from cinder.tests.unit import fake_constants as fake from cinder.tests.unit.scheduler import fakes from cinder.tests.unit.scheduler import test_scheduler from cinder.volume import utils @@ -114,7 +116,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}, - 'volume_id': ['fake-id1']} + 'volume_id': fake.VOLUME_ID} + request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.NoValidHost, sched.schedule_create_volume, fake_context, request_spec, {}) @@ -127,6 +130,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}} + request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.NoValidHost, sched.schedule_create_volume, fake_context, @@ -141,7 +145,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): # request_spec is missing 'volume_type' request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, - 'volume_id': ['fake-id1']} + 'volume_id': fake.VOLUME_ID} + request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.InvalidVolumeType, sched.schedule_create_volume, fake_context, @@ -169,7 +174,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}, - 'volume_id': ['fake-id1']} + 'volume_id': fake.VOLUME_ID} + request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.NoValidHost, sched.schedule_create_volume, fake_context, request_spec, {}) self.assertTrue(self.was_admin) @@ -188,6 +194,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} + request_spec = objects.RequestSpec.from_primitives(request_spec) weighed_host = sched._schedule(fake_context, request_spec, {}) self.assertIsNotNone(weighed_host.obj) self.assertTrue(_mock_service_get_all.called) @@ -205,6 +212,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}, 'CG_backend': 'host@lvmdriver'} + request_spec = objects.RequestSpec.from_primitives(request_spec) weighed_host = sched._schedule(fake_context, request_spec, {}) self.assertIsNone(weighed_host) @@ -220,6 +228,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}, 'CG_backend': 'host1'} + request_spec = objects.RequestSpec.from_primitives(request_spec) weighed_host = sched._schedule(fake_context, request_spec, {}) self.assertEqual('host1#lvm1', weighed_host.obj.host) @@ -243,6 +252,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} + request_spec = objects.RequestSpec.from_primitives(request_spec) filter_properties = {} sched._schedule(self.context, request_spec, @@ -259,6 +269,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} + request_spec = objects.RequestSpec.from_primitives(request_spec) filter_properties = {} sched._schedule(self.context, request_spec, @@ -275,6 +286,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} + request_spec = objects.RequestSpec.from_primitives(request_spec) retry = dict(num_attempts=1) filter_properties = dict(retry=retry) @@ -293,6 +305,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} + request_spec = objects.RequestSpec.from_primitives(request_spec) retry = dict(num_attempts=2) filter_properties = dict(retry=retry) @@ -343,10 +356,11 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): """Do a successful pass through of with host_passes_filters().""" sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) - request_spec = {'volume_id': 1, + request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} + request_spec = objects.RequestSpec.from_primitives(request_spec) ret_host = sched.host_passes_filters(ctx, 'host1#lvm1', request_spec, {}) self.assertEqual('host1', utils.extract_host(ret_host.host)) @@ -358,10 +372,11 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): """Do a successful pass through of with host_passes_filters().""" sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) - request_spec = {'volume_id': 1, + request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} + request_spec = objects.RequestSpec.from_primitives(request_spec) ret_host = sched.host_passes_filters(ctx, 'host5#_pool0', request_spec, {}) self.assertEqual('host5', utils.extract_host(ret_host.host)) @@ -372,10 +387,11 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): """Fail the host due to insufficient capacity.""" sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) - request_spec = {'volume_id': 1, + request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1024}} + request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.NoValidHost, sched.host_passes_filters, ctx, 'host1#lvm1', request_spec, {}) @@ -390,12 +406,13 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) extra_specs = {'volume_backend_name': 'lvm4'} - request_spec = {'volume_id': 1, + request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI', 'extra_specs': extra_specs}, 'volume_properties': {'project_id': 1, 'size': 200, 'host': 'host4#lvm4'}} + request_spec = objects.RequestSpec.from_primitives(request_spec) host_state = sched.find_retype_host(ctx, request_spec, filter_properties={}, migration_policy='never') @@ -411,12 +428,13 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) extra_specs = {'volume_backend_name': 'lvm3'} - request_spec = {'volume_id': 1, + request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI', 'extra_specs': extra_specs}, 'volume_properties': {'project_id': 1, 'size': 200, 'host': 'host3#lvm3'}} + request_spec = objects.RequestSpec.from_primitives(request_spec) host_state = sched.find_retype_host(ctx, request_spec, filter_properties={}, migration_policy='never') @@ -429,12 +447,13 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) extra_specs = {'volume_backend_name': 'lvm1'} - request_spec = {'volume_id': 1, + request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI', 'extra_specs': extra_specs}, 'volume_properties': {'project_id': 1, 'size': 200, 'host': 'host4'}} + request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.NoValidHost, sched.find_retype_host, ctx, request_spec, filter_properties={}, migration_policy='never') @@ -446,12 +465,13 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) extra_specs = {'volume_backend_name': 'lvm1'} - request_spec = {'volume_id': 1, + request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI', 'extra_specs': extra_specs}, 'volume_properties': {'project_id': 1, 'size': 200, 'host': 'host4'}} + request_spec = objects.RequestSpec.from_primitives(request_spec) host_state = sched.find_retype_host(ctx, request_spec, filter_properties={}, migration_policy='on-demand') @@ -464,12 +484,13 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): sched, ctx = self._host_passes_filters_setup( _mock_service_get_topic) extra_specs = {'volume_backend_name': 'lvm1'} - request_spec = {'volume_id': 1, + request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI', 'extra_specs': extra_specs}, 'volume_properties': {'project_id': 1, 'size': 2048, 'host': 'host4'}} + request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.NoValidHost, sched.find_retype_host, ctx, request_spec, filter_properties={}, migration_policy='on-demand') diff --git a/cinder/tests/unit/scheduler/test_rpcapi.py b/cinder/tests/unit/scheduler/test_rpcapi.py index 9a0159728ad..bae2979942c 100644 --- a/cinder/tests/unit/scheduler/test_rpcapi.py +++ b/cinder/tests/unit/scheduler/test_rpcapi.py @@ -86,7 +86,23 @@ class SchedulerRpcAPITestCase(test.TestCase): fanout=True, version='2.0') - def test_create_volume(self): + @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) + def test_create_volume(self, can_send_version): + self._test_scheduler_api('create_volume', + rpc_method='cast', + topic='topic', + volume_id='volume_id', + snapshot_id='snapshot_id', + image_id='image_id', + request_spec='fake_request_spec', + filter_properties='filter_properties', + volume='volume', + version='2.2') + can_send_version.assert_has_calls([mock.call('2.2')]) + + @mock.patch('oslo_messaging.RPCClient.can_send_version', + return_value=False) + def test_create_volume_serialization(self, can_send_version): self._test_scheduler_api('create_volume', rpc_method='cast', topic='topic', @@ -97,6 +113,7 @@ class SchedulerRpcAPITestCase(test.TestCase): filter_properties='filter_properties', volume='volume', version='2.0') + can_send_version.assert_has_calls([mock.call('2.2')]) def test_migrate_volume_to_host(self): self._test_scheduler_api('migrate_volume_to_host', diff --git a/cinder/tests/unit/scheduler/test_scheduler.py b/cinder/tests/unit/scheduler/test_scheduler.py index e1f5d13708f..b5c5cf5807d 100644 --- a/cinder/tests/unit/scheduler/test_scheduler.py +++ b/cinder/tests/unit/scheduler/test_scheduler.py @@ -130,7 +130,11 @@ class SchedulerManagerTestCase(test.TestCase): _mock_sched_create.side_effect = exception.NoValidHost(reason="") volume = fake_volume.fake_volume_obj(self.context) topic = 'fake_topic' - request_spec = {'volume_id': volume.id} + request_spec = {'volume_id': volume.id, + 'volume': {'id': volume.id, '_name_id': None, + 'metadata': {}, 'admin_metadata': {}, + 'glance_metadata': {}}} + request_spec_obj = objects.RequestSpec.from_primitives(request_spec) self.manager.create_volume(self.context, topic, volume.id, request_spec=request_spec, @@ -139,8 +143,8 @@ class SchedulerManagerTestCase(test.TestCase): _mock_volume_update.assert_called_once_with(self.context, volume.id, {'status': 'error'}) - _mock_sched_create.assert_called_once_with(self.context, request_spec, - {}) + _mock_sched_create.assert_called_once_with(self.context, + request_spec_obj, {}) _mock_message_create.assert_called_once_with( self.context, defined_messages.UNABLE_TO_ALLOCATE, @@ -154,13 +158,14 @@ class SchedulerManagerTestCase(test.TestCase): topic = 'fake_topic' request_spec = {'volume_id': volume.id} + request_spec_obj = objects.RequestSpec.from_primitives(request_spec) self.manager.create_volume(self.context, topic, volume.id, request_spec=request_spec, filter_properties={}, volume=volume) - _mock_sched_create.assert_called_once_with(self.context, request_spec, - {}) + _mock_sched_create.assert_called_once_with(self.context, + request_spec_obj, {}) self.assertFalse(_mock_sleep.called) @mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume') @@ -174,6 +179,7 @@ class SchedulerManagerTestCase(test.TestCase): topic = 'fake_topic' request_spec = {'volume_id': volume.id} + request_spec_obj = objects.RequestSpec.from_primitives(request_spec) _mock_is_ready.side_effect = [False, False, True] @@ -181,8 +187,8 @@ class SchedulerManagerTestCase(test.TestCase): request_spec=request_spec, filter_properties={}, volume=volume) - _mock_sched_create.assert_called_once_with(self.context, request_spec, - {}) + _mock_sched_create.assert_called_once_with(self.context, + request_spec_obj, {}) calls = [mock.call(1)] * 2 _mock_sleep.assert_has_calls(calls) self.assertEqual(2, _mock_sleep.call_count) @@ -198,6 +204,7 @@ class SchedulerManagerTestCase(test.TestCase): topic = 'fake_topic' request_spec = {'volume_id': volume.id} + request_spec_obj = objects.RequestSpec.from_primitives(request_spec) _mock_is_ready.return_value = True @@ -205,8 +212,8 @@ class SchedulerManagerTestCase(test.TestCase): request_spec=request_spec, filter_properties={}, volume=volume) - _mock_sched_create.assert_called_once_with(self.context, request_spec, - {}) + _mock_sched_create.assert_called_once_with(self.context, + request_spec_obj, {}) self.assertFalse(_mock_sleep.called) @mock.patch('cinder.db.volume_get') diff --git a/cinder/tests/unit/test_volume.py b/cinder/tests/unit/test_volume.py index 5f8460a7784..4d50a3f2795 100644 --- a/cinder/tests/unit/test_volume.py +++ b/cinder/tests/unit/test_volume.py @@ -127,7 +127,7 @@ class FakeImageService(object): class BaseVolumeTestCase(test.TestCase): """Test Case for volumes.""" - FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaa' + FAKE_UUID = fake.IMAGE_ID def setUp(self): super(BaseVolumeTestCase, self).setUp() @@ -1082,12 +1082,14 @@ class VolumeTestCase(BaseVolumeTestCase): @mock.patch.object(keymgr, 'API', new=fake_keymgr.fake_api) def test_create_delete_volume_with_encrypted_volume_type(self): - db_vol_type = db.volume_type_create( - self.context, {'id': fake.VOLUME_TYPE_ID, 'name': 'LUKS'}) + db.volume_type_create(self.context, + {'id': fake.VOLUME_TYPE_ID, 'name': 'LUKS'}) db.volume_type_encryption_create( self.context, fake.VOLUME_TYPE_ID, {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER}) + db_vol_type = db.volume_type_get_by_name(self.context, 'LUKS') + volume = self.volume_api.create(self.context, 1, 'name', @@ -3671,7 +3673,7 @@ class VolumeTestCase(BaseVolumeTestCase): volume_api = cinder.volume.api.API( image_service=FakeImageService()) volume = volume_api.create(self.context, 2, 'name', 'description', - image_id=1) + image_id=self.FAKE_UUID) volume_id = volume['id'] self.assertEqual('creating', volume['status']) diff --git a/cinder/tests/unit/test_volume_rpcapi.py b/cinder/tests/unit/test_volume_rpcapi.py index 2cdcbaf65f5..203c331259c 100644 --- a/cinder/tests/unit/test_volume_rpcapi.py +++ b/cinder/tests/unit/test_volume_rpcapi.py @@ -245,7 +245,8 @@ class VolumeRpcAPITestCase(test.TestCase): self._test_volume_api('delete_cgsnapshot', rpc_method='cast', cgsnapshot=self.fake_cgsnap, version='2.0') - def test_create_volume(self): + @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) + def test_create_volume(self, can_send_version): self._test_volume_api('create_volume', rpc_method='cast', volume=self.fake_volume_obj, @@ -253,9 +254,12 @@ class VolumeRpcAPITestCase(test.TestCase): request_spec='fake_request_spec', filter_properties='fake_properties', allow_reschedule=True, - version='2.0') + version='2.4') + can_send_version.assert_has_calls([mock.call('2.4')]) - def test_create_volume_serialization(self): + @mock.patch('oslo_messaging.RPCClient.can_send_version', + return_value=False) + def test_create_volume_serialization(self, can_send_version): request_spec = {"metadata": self.fake_volume_metadata} self._test_volume_api('create_volume', rpc_method='cast', @@ -265,6 +269,7 @@ class VolumeRpcAPITestCase(test.TestCase): filter_properties='fake_properties', allow_reschedule=True, version='2.0') + can_send_version.assert_has_calls([mock.call('2.4')]) def test_delete_volume(self): self._test_volume_api('delete_volume', diff --git a/cinder/volume/flows/api/create_volume.py b/cinder/volume/flows/api/create_volume.py index bc5f6541ef8..89dbbd645ed 100644 --- a/cinder/volume/flows/api/create_volume.py +++ b/cinder/volume/flows/api/create_volume.py @@ -506,6 +506,16 @@ class EntryCreateTask(flow_utils.CinderTask): volume = objects.Volume(context=context, **volume_properties) volume.create() + # FIXME(dulek): We're passing this volume_properties dict through RPC + # in request_spec. This shouldn't be needed, most data is replicated + # in both volume and other places. We should make Newton read data + # from just one correct place and leave just compatibility code. + # + # Right now - let's move it to versioned objects to be able to make + # non-backward compatible changes. + + volume_properties = objects.VolumeProperties(**volume_properties) + return { 'volume_id': volume['id'], 'volume_properties': volume_properties, @@ -687,10 +697,6 @@ class VolumeCastTask(flow_utils.CinderTask): # If cgroup_id existed, we should cast volume to the scheduler # to choose a proper pool whose backend is same as CG's backend. cgroup = objects.ConsistencyGroup.get_by_id(context, cgroup_id) - # FIXME(wanghao): CG_backend got added before request_spec was - # converted to versioned objects. We should make sure that this - # will be handled by object version translations once we add - # RequestSpec object. request_spec['CG_backend'] = vol_utils.extract_host(cgroup.host) elif snapshot_id and CONF.snapshot_same_host: # NOTE(Rongze Zhu): A simple solution for bug 1008866. @@ -741,7 +747,13 @@ class VolumeCastTask(flow_utils.CinderTask): def execute(self, context, **kwargs): scheduler_hints = kwargs.pop('scheduler_hints', None) - request_spec = kwargs.copy() + db_vt = kwargs.pop('volume_type') + kwargs['volume_type'] = None + if db_vt: + kwargs['volume_type'] = objects.VolumeType() + objects.VolumeType()._from_db_object(context, + kwargs['volume_type'], db_vt) + request_spec = objects.RequestSpec(**kwargs) filter_properties = {} if scheduler_hints: filter_properties['scheduler_hints'] = scheduler_hints diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index 3cc3de4b50b..bd336fd8c41 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -526,12 +526,17 @@ class VolumeManager(manager.SchedulerDependentManager): # by its volume_id. volume = objects.Volume.get_by_id(context, volume_id) + # FIXME(dulek): Remove this in v3.0 of RPC API. + if isinstance(request_spec, dict): + # We may receive request_spec as dict from older clients. + request_spec = objects.RequestSpec.from_primitives(request_spec) + context_elevated = context.elevated() if filter_properties is None: filter_properties = {} if request_spec is None: - request_spec = {} + request_spec = objects.RequestSpec() try: # NOTE(flaper87): Driver initialization is diff --git a/cinder/volume/rpcapi.py b/cinder/volume/rpcapi.py index 6b81bb748d0..5af938e58b5 100644 --- a/cinder/volume/rpcapi.py +++ b/cinder/volume/rpcapi.py @@ -103,9 +103,10 @@ class VolumeAPI(rpc.RPCAPI): 2.2 - Adds support for sending objects over RPC in manage_existing(). 2.3 - Adds support for sending objects over RPC in initialize_connection(). + 2.4 - Sends request_spec as object in create_volume(). """ - RPC_API_VERSION = '2.3' + RPC_API_VERSION = '2.4' TOPIC = CONF.volume_topic BINARY = 'cinder-volume' @@ -156,12 +157,19 @@ class VolumeAPI(rpc.RPCAPI): def create_volume(self, ctxt, volume, host, request_spec, filter_properties, allow_reschedule=True): - request_spec_p = jsonutils.to_primitive(request_spec) - cctxt = self._get_cctxt(host, '2.0') - cctxt.cast(ctxt, 'create_volume', volume_id=volume.id, - request_spec=request_spec_p, - filter_properties=filter_properties, - allow_reschedule=allow_reschedule, volume=volume) + msg_args = {'volume_id': volume.id, 'request_spec': request_spec, + 'filter_properties': filter_properties, + 'allow_reschedule': allow_reschedule, + 'volume': volume, + } + version = '2.4' + if not self.client.can_send_version('2.4'): + # Send request_spec as dict + version = '2.0' + msg_args['request_spec'] = jsonutils.to_primitive(request_spec) + + cctxt = self._get_cctxt(host, version) + cctxt.cast(ctxt, 'create_volume', **msg_args) def delete_volume(self, ctxt, volume, unmanage_only=False, cascade=False): cctxt = self._get_cctxt(volume.host, '2.0') diff --git a/tools/lintstack.py b/tools/lintstack.py index 385cf46db07..66cd66b5c22 100755 --- a/tools/lintstack.py +++ b/tools/lintstack.py @@ -89,12 +89,14 @@ objects_ignore_messages = [ "Module 'cinder.objects' has no 'ConsistencyGroupList' member", "Module 'cinder.objects' has no 'QualityOfServiceSpecs' member", "Module 'cinder.objects' has no 'QualityOfServiceSpecsList' member", + "Module 'cinder.objects' has no 'RequestSpec' member", "Module 'cinder.objects' has no 'Service' member", "Module 'cinder.objects' has no 'ServiceList' member", "Module 'cinder.objects' has no 'Snapshot' member", "Module 'cinder.objects' has no 'SnapshotList' member", "Module 'cinder.objects' has no 'Volume' member", "Module 'cinder.objects' has no 'VolumeList' member", + "Module 'cinder.objects' has no 'VolumeProperties' member", "Module 'cinder.objects' has no 'VolumeType' member", "Module 'cinder.objects' has no 'VolumeTypeList' member", ]