diff --git a/cinder/brick/local_dev/lvm.py b/cinder/brick/local_dev/lvm.py index b830dd70280..67f8e19907f 100644 --- a/cinder/brick/local_dev/lvm.py +++ b/cinder/brick/local_dev/lvm.py @@ -604,10 +604,12 @@ class LVM(executor.Executor): LOG.error(_LE('StdErr :%s'), err.stderr) raise - def activate_lv(self, name, is_snapshot=False): + def activate_lv(self, name, is_snapshot=False, permanent=False): """Ensure that logical volume/snapshot logical volume is activated. :param name: Name of LV to activate + :param is_snapshot: whether LV is a snapshot + :param permanent: whether we should drop skipactivation flag :raises: putils.ProcessExecutionError """ @@ -626,6 +628,10 @@ class LVM(executor.Executor): if self.supports_lvchange_ignoreskipactivation: cmd.append('-K') + # If permanent=True is specified, drop the skipactivation flag in + # order to make this LV automatically activated after next reboot. + if permanent: + cmd += ['-k', 'n'] cmd.append(lv_path) diff --git a/cinder/image/glance.py b/cinder/image/glance.py index f013efe2271..0a2250d2934 100644 --- a/cinder/image/glance.py +++ b/cinder/image/glance.py @@ -270,6 +270,30 @@ class GlanceImageService(object): return (getattr(image_meta, 'direct_url', None), getattr(image_meta, 'locations', None)) + def add_location(self, context, image_id, url, metadata): + """Add a backend location url to an image. + + Returns a dict containing image metadata on success. + """ + if CONF.glance_api_version != 2: + raise exception.Invalid("Image API version 2 is disabled.") + client = GlanceClientWrapper(version=2) + try: + return client.call(context, 'add_location', + image_id, url, metadata) + except Exception: + _reraise_translated_image_exception(image_id) + + def delete_locations(self, context, image_id, url_set): + """Delete backend location urls from an image.""" + if CONF.glance_api_version != 2: + raise exception.Invalid("Image API version 2 is disabled.") + client = GlanceClientWrapper(version=2) + try: + return client.call(context, 'delete_locations', image_id, url_set) + except Exception: + _reraise_translated_image_exception(image_id) + def download(self, context, image_id, data=None): """Calls out to Glance for data and writes data.""" if data and 'file' in CONF.allowed_direct_url_schemes: diff --git a/cinder/tests/unit/brick/fake_lvm.py b/cinder/tests/unit/brick/fake_lvm.py index 61f28ec01d8..8474b9c93a9 100644 --- a/cinder/tests/unit/brick/fake_lvm.py +++ b/cinder/tests/unit/brick/fake_lvm.py @@ -55,7 +55,7 @@ class FakeBrickLVM(object): def lv_has_snapshot(self, name): return False - def activate_lv(self, lv, is_snapshot=False): + def activate_lv(self, lv, is_snapshot=False, permanent=False): pass def rename_volume(self, lv_name, new_name): diff --git a/cinder/tests/unit/image/fake.py b/cinder/tests/unit/image/fake.py index 5214e09e838..cb38e470931 100644 --- a/cinder/tests/unit/image/fake.py +++ b/cinder/tests/unit/image/fake.py @@ -210,6 +210,12 @@ class _FakeImageService(object): return 'fake_location' return None + def add_location(self, context, image_id, url, metadata): + self.update(context, image_id, {'locations': [{'url': url, + 'metadata': metadata}]}) + return True + + _fakeImageService = _FakeImageService() diff --git a/cinder/tests/unit/test_volume.py b/cinder/tests/unit/test_volume.py index 256c272090b..45f2bb8205e 100644 --- a/cinder/tests/unit/test_volume.py +++ b/cinder/tests/unit/test_volume.py @@ -3381,8 +3381,12 @@ class VolumeTestCase(BaseVolumeTestCase): self.context, volume_id) - def _create_volume_from_image(self, fakeout_copy_image_to_volume=False, - fakeout_clone_image=False): + @mock.patch('cinder.volume.flows.manager.create_volume.' + 'CreateVolumeFromSpecTask._clone_image_volume') + def _create_volume_from_image(self, mock_clone_image_volume, + fakeout_copy_image_to_volume=False, + fakeout_clone_image=False, + clone_image_volume=False): """Test function of create_volume_from_image. Test cases call this function to create a volume from image, caller @@ -3414,6 +3418,7 @@ class VolumeTestCase(BaseVolumeTestCase): if fakeout_copy_image_to_volume: self.stubs.Set(self.volume, '_copy_image_to_volume', fake_copy_image_to_volume) + mock_clone_image_volume.return_value = ({}, clone_image_volume) image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' volume_id = tests_utils.create_volume(self.context, @@ -3508,6 +3513,17 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertDictEqual(self.volume.stats['pools'], {'_pool0': {'allocated_capacity_gb': 1}}) + def test_create_volume_from_image_clone_image_volume(self): + """Test create volume from image via image volume. + + Verify that after cloning image to volume, it is in available + state and is bootable. + """ + volume = self._create_volume_from_image(clone_image_volume=True) + self.assertEqual('available', volume['status']) + self.assertTrue(volume['bootable']) + self.volume.delete_volume(self.context, volume['id']) + def test_create_volume_from_exact_sized_image(self): """Test create volume from an image of the same size. @@ -5491,6 +5507,61 @@ class CopyVolumeToImageTestCase(BaseVolumeTestCase): self.context, saving_image_id) + @mock.patch.object(QUOTAS, 'reserve') + @mock.patch.object(QUOTAS, 'commit') + @mock.patch.object(vol_manager.VolumeManager, 'create_volume') + @mock.patch.object(fake_driver.FakeISCSIDriver, 'copy_volume_to_image') + def _test_copy_volume_to_image_with_image_volume( + self, mock_copy, mock_create, mock_quota_commit, + mock_quota_reserve): + self.flags(glance_api_version=2) + self.volume.driver.configuration.image_upload_use_cinder_backend = True + image_service = fake_image.FakeImageService() + image_id = '5c6eec33-bab4-4e7d-b2c9-88e2d0a5f6f2' + self.image_meta['id'] = image_id + self.image_meta['status'] = 'queued' + image_service.create(self.context, self.image_meta) + + # creating volume testdata + self.volume_attrs['instance_uuid'] = None + db.volume_create(self.context, self.volume_attrs) + + def fake_create(context, volume_id, **kwargs): + db.volume_update(context, volume_id, {'status': 'available'}) + + mock_create.side_effect = fake_create + + # start test + self.volume.copy_volume_to_image(self.context, + self.volume_id, + self.image_meta) + + volume = db.volume_get(self.context, self.volume_id) + self.assertEqual('available', volume['status']) + + # return create image + image = image_service.show(self.context, image_id) + image_service.delete(self.context, image_id) + return image + + def test_copy_volume_to_image_with_image_volume(self): + image = self._test_copy_volume_to_image_with_image_volume() + self.assertTrue(image['locations'][0]['url'].startswith('cinder://')) + + def test_copy_volume_to_image_with_image_volume_qcow2(self): + self.image_meta['disk_format'] = 'qcow2' + image = self._test_copy_volume_to_image_with_image_volume() + self.assertIsNone(image.get('locations')) + + @mock.patch.object(vol_manager.VolumeManager, 'delete_volume') + @mock.patch.object(fake_image._FakeImageService, 'add_location', + side_effect=exception.Invalid) + def test_copy_volume_to_image_with_image_volume_failure( + self, mock_add_location, mock_delete): + image = self._test_copy_volume_to_image_with_image_volume() + self.assertIsNone(image.get('locations')) + self.assertTrue(mock_delete.called) + class GetActiveByWindowTestCase(BaseVolumeTestCase): def setUp(self): @@ -6574,6 +6645,24 @@ class LVMVolumeDriverTestCase(DriverTestCase): self.assertEqual('default', lvm_driver.configuration.lvm_type) + @mock.patch.object(lvm.LVMISCSIDriver, 'extend_volume') + def test_create_cloned_volume_by_thin_snapshot(self, mock_extend): + self.configuration.lvm_type = 'thin' + fake_vg = mock.Mock(fake_lvm.FakeBrickLVM('cinder-volumes', False, + None, 'default')) + lvm_driver = lvm.LVMISCSIDriver(configuration=self.configuration, + vg_obj=fake_vg, + db=db) + fake_volume = tests_utils.create_volume(self.context, size=1) + fake_new_volume = tests_utils.create_volume(self.context, size=2) + + lvm_driver.create_cloned_volume(fake_new_volume, fake_volume) + fake_vg.create_lv_snapshot.assert_called_once_with( + fake_new_volume['name'], fake_volume['name'], 'thin') + mock_extend.assert_called_once_with(fake_new_volume, 2) + fake_vg.activate_lv.assert_called_once_with( + fake_new_volume['name'], is_snapshot=True, permanent=True) + class ISCSITestCase(DriverTestCase): """Test Case for ISCSIDriver""" diff --git a/cinder/tests/unit/volume/flows/test_create_volume_flow.py b/cinder/tests/unit/volume/flows/test_create_volume_flow.py index ada3891368d..d5c447b90d5 100644 --- a/cinder/tests/unit/volume/flows/test_create_volume_flow.py +++ b/cinder/tests/unit/volume/flows/test_create_volume_flow.py @@ -223,3 +223,60 @@ class CreateVolumeFlowManagerTestCase(test.TestCase): volume, snapshot_obj.id) fake_driver.create_volume_from_snapshot.assert_called_once_with( volume, snapshot_obj) + + +class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase): + + def setUp(self): + super(CreateVolumeFlowManagerGlanceCinderBackendCase, self).setUp() + self.ctxt = context.get_admin_context() + + @mock.patch('cinder.volume.flows.manager.create_volume.' + 'CreateVolumeFromSpecTask.' + '_handle_bootable_volume_glance_meta') + def test_create_from_image_volume(self, handle_bootable, format='raw', + owner=None, location=True): + self.flags(allowed_direct_url_schemes=['cinder']) + fake_db = mock.MagicMock() + fake_driver = mock.MagicMock() + fake_manager = create_volume_manager.CreateVolumeFromSpecTask( + fake_db, fake_driver) + fake_image_service = mock.MagicMock() + volume = fake_volume.fake_volume_obj(self.ctxt) + image_volume = fake_volume.fake_volume_obj(self.ctxt, + volume_metadata={}) + image_id = '34e54c31-3bc8-5c1d-9fff-2225bcce4b59' + url = 'cinder://%s' % image_volume['id'] + image_location = None + if location: + image_location = (url, [{'url': url, 'metadata': {}}]) + image_meta = {'id': image_id, + 'container_format': 'bare', + 'disk_format': format, + 'owner': owner or self.ctxt.project_id} + fake_driver.clone_image.return_value = (None, False) + fake_db.volume_get_all_by_host.return_value = [image_volume] + + fake_manager._create_from_image(self.ctxt, + volume, + image_location, + image_id, + image_meta, + fake_image_service) + if format is 'raw' and not owner and location: + fake_driver.create_cloned_volume.assert_called_once_with( + volume, image_volume) + handle_bootable.assert_called_once_with(self.ctxt, volume['id'], + image_id=image_id, + image_meta=image_meta) + else: + self.assertFalse(fake_driver.create_cloned_volume.called) + + def test_create_from_image_volume_in_qcow2_format(self): + self.test_create_from_image_volume(format='qcow2') + + def test_create_from_image_volume_of_other_owner(self): + self.test_create_from_image_volume(owner='fake-owner') + + def test_create_from_image_volume_without_location(self): + self.test_create_from_image_volume(location=False) diff --git a/cinder/volume/driver.py b/cinder/volume/driver.py index 7f6c8646e2a..9dd83fb4b63 100644 --- a/cinder/volume/driver.py +++ b/cinder/volume/driver.py @@ -234,7 +234,21 @@ volume_opts = [ "is: {'key-1'='val1' 'key-2'='val2'...},{...} " "and for managed devices its simply a list of valid " "configured backend_names that the driver supports " - "replicating to: backend-a,bakcend-b...") + "replicating to: backend-a,bakcend-b..."), + cfg.BoolOpt('image_upload_use_cinder_backend', + default=False, + help='If set to True, upload-to-image in raw format will ' + 'create a cloned volume and register its location to ' + 'the image service, instead of uploading the volume ' + 'content. The cinder backend and locations support ' + 'must be enabled in the image service, and ' + 'glance_api_version must be set to 2.'), + cfg.BoolOpt('image_upload_use_internal_tenant', + default=False, + help='If set to True, the image volume created by ' + 'upload-to-image will be placed in the internal tenant. ' + 'Otherwise, the image volume is created in the current ' + 'context\'s tenant.'), ] # for backward compatibility diff --git a/cinder/volume/drivers/lvm.py b/cinder/volume/drivers/lvm.py index f0bededc455..d1e2f50589f 100644 --- a/cinder/volume/drivers/lvm.py +++ b/cinder/volume/drivers/lvm.py @@ -449,6 +449,16 @@ class LVMVolumeDriver(driver.VolumeDriver): def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" + if self.configuration.lvm_type == 'thin': + self.vg.create_lv_snapshot(volume['name'], + src_vref['name'], + self.configuration.lvm_type) + if volume['size'] > src_vref['size']: + LOG.debug("Resize the new volume to %s.", volume['size']) + self.extend_volume(volume, volume['size']) + self.vg.activate_lv(volume['name'], is_snapshot=True, + permanent=True) + return mirror_count = 0 if self.configuration.lvm_mirrors: diff --git a/cinder/volume/flows/manager/create_volume.py b/cinder/volume/flows/manager/create_volume.py index 6f9346a5b98..9abef9f43d1 100644 --- a/cinder/volume/flows/manager/create_volume.py +++ b/cinder/volume/flows/manager/create_volume.py @@ -570,6 +570,59 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): self.db.volume_glance_metadata_bulk_create(context, volume_id, volume_metadata) + def _clone_image_volume(self, context, volume, image_location, image_meta): + """Create a volume efficiently from an existing image. + + Returns a dict of volume properties eg. provider_location, + boolean indicating whether cloning occurred + """ + if not image_location: + return None, False + + if (image_meta.get('container_format') != 'bare' or + image_meta.get('disk_format') != 'raw'): + LOG.info(_LI("Requested image %(id)s is not in raw format."), + {'id': image_meta.get('id')}) + return None, False + + image_volume = None + direct_url, locations = image_location + urls = set([direct_url] + [loc.get('url') for loc in locations or []]) + image_volume_ids = [url[9:] for url in urls + if url and url.startswith('cinder://')] + image_volumes = self.db.volume_get_all_by_host( + context, volume['host'], filters={'id': image_volume_ids}) + + for image_volume in image_volumes: + # For the case image volume is stored in the service tenant, + # image_owner volume metadata should also be checked. + image_owner = None + volume_metadata = image_volume.get('volume_metadata') or {} + for m in volume_metadata: + if m['key'] == 'image_owner': + image_owner = m['value'] + if (image_meta['owner'] != volume['project_id'] and + image_meta['owner'] != image_owner): + LOG.info(_LI("Skipping image volume %(id)s because " + "it is not accessible by current Tenant."), + {'id': image_volume.id}) + continue + + LOG.info(_LI("Will clone a volume from the image volume " + "%(id)s."), {'id': image_volume.id}) + break + else: + LOG.debug("No accessible image volume for image %(id)s found.", + {'id': image_meta['id']}) + return None, False + + try: + return self.driver.create_cloned_volume(volume, image_volume), True + except (NotImplementedError, exception.CinderException): + LOG.exception(_LE('Failed to clone image volume %(id)s.'), + {'id': image_volume['id']}) + return None, False + def _create_from_image(self, context, volume_ref, image_location, image_id, image_meta, image_service, **kwargs): @@ -587,6 +640,11 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask): image_location, image_meta, image_service) + if not cloned and 'cinder' in CONF.allowed_direct_url_schemes: + model_update, cloned = self._clone_image_volume(context, + volume_ref, + image_location, + image_meta) if not cloned: # TODO(harlowja): what needs to be rolled back in the clone if this # volume create fails?? Likely this should be a subflow or broken diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index fa5a2d1eecd..cbbe984e3b5 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -966,6 +966,107 @@ class VolumeManager(manager.SchedulerDependentManager): self._notify_about_volume_usage(context, volume, "detach.end") LOG.info(_LI("Detach volume completed successfully."), resource=volume) + def _clone_image_volume(self, ctx, volume, image_meta): + volume_type_id = volume.get('volume_type_id') + reserve_opts = {'volumes': 1, 'gigabytes': volume.size} + QUOTAS.add_volume_type_opts(ctx, reserve_opts, volume_type_id) + reservations = QUOTAS.reserve(ctx, **reserve_opts) + + try: + new_vol_values = {} + for k, v in volume.items(): + new_vol_values[k] = v + del new_vol_values['id'] + del new_vol_values['_name_id'] + del new_vol_values['volume_type'] + new_vol_values['volume_type_id'] = volume_type_id + new_vol_values['attach_status'] = 'detached' + new_vol_values['volume_attachment'] = [] + new_vol_values['status'] = 'creating' + new_vol_values['project_id'] = ctx.project_id + new_vol_values['display_name'] = 'image-%s' % image_meta['id'] + new_vol_values['source_volid'] = volume.id + LOG.debug('Creating image volume entry: %s.', new_vol_values) + image_volume = self.db.volume_create(ctx, new_vol_values) + except Exception: + QUOTAS.rollback(ctx, reservations) + return False + + QUOTAS.commit(ctx, reservations, + project_id=new_vol_values['project_id']) + + try: + self.create_volume(ctx, image_volume.id, + allow_reschedule=False) + image_volume = self.db.volume_get(ctx, image_volume.id) + if image_volume.status != 'available': + raise exception.InvalidVolume(_('Volume is not available.')) + + self.db.volume_admin_metadata_update(ctx.elevated(), + image_volume.id, + {'readonly': 'True'}, + False) + return image_volume + except exception.CinderException: + LOG.exception(_LE('Failed to clone volume %(volume_id)s for ' + 'image %(image_id).'), + {'volume_id': volume.id, + 'image_id': image_meta['id']}) + try: + self.delete_volume(ctx, image_volume) + except exception.CinderException: + LOG.exception(_LE('Could not delete the image volume %(id)s.'), + {'id': volume.id}) + return False + + def _clone_image_volume_and_add_location(self, ctx, volume, image_service, + image_meta): + """Create a cloned volume and register its location to the image.""" + if (image_meta['disk_format'] != 'raw' or + image_meta['container_format'] != 'bare'): + return False + + image_volume_context = ctx + if self.driver.configuration.image_upload_use_internal_tenant: + internal_ctx = context.get_internal_tenant_context() + if internal_ctx: + image_volume_context = internal_ctx + + image_volume = self._clone_image_volume(image_volume_context, + volume, + image_meta) + if not image_volume: + return False + + uri = 'cinder://%s' % image_volume.id + image_registered = None + try: + image_registered = image_service.add_location( + ctx, image_meta['id'], uri, {}) + except (exception.NotAuthorized, exception.Invalid, + exception.NotFound): + LOG.exception(_LE('Failed to register image volume location ' + '%(uri)s.'), {'uri': uri}) + + if not image_registered: + LOG.warning(_LW('Registration of image volume URI %(uri)s ' + 'to image %(image_id)s failed.'), + {'uri': uri, 'image_id': image_meta['id']}) + try: + self.delete_volume(image_volume_context, image_volume) + except exception.CinderException: + LOG.exception(_LE('Could not delete failed image volume ' + '%(id)s.'), {'id': image_volume.id}) + return False + + image_volume_meta = {'glance_image_id': image_meta['id'], + 'image_owner': ctx.project_id} + self.db.volume_metadata_update(image_volume_context, + image_volume.id, + image_volume_meta, + False) + return True + def copy_volume_to_image(self, context, volume_id, image_meta): """Uploads the specified volume to Glance. @@ -985,10 +1086,19 @@ class VolumeManager(manager.SchedulerDependentManager): image_service, image_id = \ glance.get_remote_image_service(context, image_meta['id']) - self.driver.copy_volume_to_image(context, volume, image_service, - image_meta) - LOG.debug("Uploaded volume to glance image-id: %(image_id)s.", - resource=volume) + if (self.driver.configuration.image_upload_use_cinder_backend + and self._clone_image_volume_and_add_location( + context, volume, image_service, image_meta)): + LOG.debug("Registered image volume location to glance " + "image-id: %(image_id)s.", + {'image_id': image_meta['id']}, + resource=volume) + else: + self.driver.copy_volume_to_image(context, volume, + image_service, image_meta) + LOG.debug("Uploaded volume to glance image-id: %(image_id)s.", + {'image_id': image_meta['id']}, + resource=volume) except Exception as error: LOG.error(_LE("Upload volume to image encountered an error " "(image-id: %(image_id)s)."),