Add group snapshots - manager
This is the fourth patch that implements the generic-volume-group bluerpint. It implements group snapshots and create group from source in the volume manager. It depends on the third patch which adds db and objects for group snapshots: https://review.openstack.org/#/c/328052/ The next patch adds group snapshots APIs: https://review.openstack.org/#/c/361369/ Change-Id: I5191a4cd1d82cc3598d71f2460d8239bdf1869c7 Partial-Implements: blueprint generic-volume-group
This commit is contained in:
parent
325f99a64a
commit
4a67bc8218
732
cinder/tests/unit/group/test_groups_manager.py
Normal file
732
cinder/tests/unit/group/test_groups_manager.py
Normal file
@ -0,0 +1,732 @@
|
||||
# Copyright (C) 2016 EMC Corporation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import importutils
|
||||
|
||||
from cinder import context
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder import objects
|
||||
from cinder.objects import fields
|
||||
from cinder import quota
|
||||
from cinder import test
|
||||
from cinder.tests.unit import conf_fixture
|
||||
from cinder.tests.unit import fake_constants as fake
|
||||
from cinder.tests.unit import fake_snapshot
|
||||
from cinder.tests.unit import utils as tests_utils
|
||||
import cinder.volume
|
||||
from cinder.volume import configuration as conf
|
||||
from cinder.volume import driver
|
||||
from cinder.volume import utils as volutils
|
||||
|
||||
GROUP_QUOTAS = quota.GROUP_QUOTAS
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class GroupManagerTestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(GroupManagerTestCase, self).setUp()
|
||||
self.volume = importutils.import_object(CONF.volume_manager)
|
||||
self.configuration = mock.Mock(conf.Configuration)
|
||||
self.context = context.get_admin_context()
|
||||
self.context.user_id = fake.USER_ID
|
||||
self.project_id = fake.PROJECT3_ID
|
||||
self.context.project_id = self.project_id
|
||||
self.volume.driver.set_initialized()
|
||||
self.volume.stats = {'allocated_capacity_gb': 0,
|
||||
'pools': {}}
|
||||
self.volume_api = cinder.volume.api.API()
|
||||
|
||||
def test_delete_volume_in_group(self):
|
||||
"""Test deleting a volume that's tied to a group fails."""
|
||||
volume_api = cinder.volume.api.API()
|
||||
volume_params = {'status': 'available',
|
||||
'group_id': fake.GROUP_ID}
|
||||
volume = tests_utils.create_volume(self.context, **volume_params)
|
||||
self.assertRaises(exception.InvalidVolume,
|
||||
volume_api.delete, self.context, volume)
|
||||
|
||||
@mock.patch.object(GROUP_QUOTAS, "reserve",
|
||||
return_value=["RESERVATION"])
|
||||
@mock.patch.object(GROUP_QUOTAS, "commit")
|
||||
@mock.patch.object(GROUP_QUOTAS, "rollback")
|
||||
@mock.patch.object(driver.VolumeDriver,
|
||||
"delete_group",
|
||||
return_value=({'status': (
|
||||
fields.GroupStatus.DELETED)}, []))
|
||||
def test_create_delete_group(self, fake_delete_grp,
|
||||
fake_rollback,
|
||||
fake_commit, fake_reserve):
|
||||
"""Test group can be created and deleted."""
|
||||
|
||||
def fake_driver_create_grp(context, group):
|
||||
"""Make sure that the pool is part of the host."""
|
||||
self.assertIn('host', group)
|
||||
host = group.host
|
||||
pool = volutils.extract_host(host, level='pool')
|
||||
self.assertEqual('fakepool', pool)
|
||||
return {'status': fields.GroupStatus.AVAILABLE}
|
||||
|
||||
self.mock_object(self.volume.driver, 'create_group',
|
||||
fake_driver_create_grp)
|
||||
|
||||
group = tests_utils.create_group(
|
||||
self.context,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
volume_type_ids=[fake.VOLUME_TYPE_ID],
|
||||
host='fakehost@fakedrv#fakepool',
|
||||
group_type_id=fake.GROUP_TYPE_ID)
|
||||
group = objects.Group.get_by_id(self.context, group.id)
|
||||
self.assertEqual(0, len(self.notifier.notifications),
|
||||
self.notifier.notifications)
|
||||
self.volume.create_group(self.context, group)
|
||||
self.assertEqual(2, len(self.notifier.notifications),
|
||||
self.notifier.notifications)
|
||||
msg = self.notifier.notifications[0]
|
||||
self.assertEqual('group.create.start', msg['event_type'])
|
||||
expected = {
|
||||
'status': fields.GroupStatus.AVAILABLE,
|
||||
'name': 'test_group',
|
||||
'availability_zone': 'nova',
|
||||
'tenant_id': self.context.project_id,
|
||||
'created_at': 'DONTCARE',
|
||||
'user_id': fake.USER_ID,
|
||||
'group_id': group.id,
|
||||
'group_type': fake.GROUP_TYPE_ID
|
||||
}
|
||||
self.assertDictMatch(expected, msg['payload'])
|
||||
msg = self.notifier.notifications[1]
|
||||
self.assertEqual('group.create.end', msg['event_type'])
|
||||
self.assertDictMatch(expected, msg['payload'])
|
||||
self.assertEqual(
|
||||
group.id,
|
||||
objects.Group.get_by_id(context.get_admin_context(),
|
||||
group.id).id)
|
||||
|
||||
self.volume.delete_group(self.context, group)
|
||||
grp = objects.Group.get_by_id(
|
||||
context.get_admin_context(read_deleted='yes'), group.id)
|
||||
self.assertEqual(fields.GroupStatus.DELETED, grp.status)
|
||||
self.assertEqual(4, len(self.notifier.notifications),
|
||||
self.notifier.notifications)
|
||||
msg = self.notifier.notifications[2]
|
||||
self.assertEqual('group.delete.start', msg['event_type'])
|
||||
self.assertDictMatch(expected, msg['payload'])
|
||||
msg = self.notifier.notifications[3]
|
||||
self.assertEqual('group.delete.end', msg['event_type'])
|
||||
expected['status'] = fields.GroupStatus.DELETED
|
||||
self.assertDictMatch(expected, msg['payload'])
|
||||
self.assertRaises(exception.NotFound,
|
||||
objects.Group.get_by_id,
|
||||
self.context,
|
||||
group.id)
|
||||
|
||||
@mock.patch.object(GROUP_QUOTAS, "reserve",
|
||||
return_value=["RESERVATION"])
|
||||
@mock.patch.object(GROUP_QUOTAS, "commit")
|
||||
@mock.patch.object(GROUP_QUOTAS, "rollback")
|
||||
@mock.patch.object(driver.VolumeDriver,
|
||||
"create_group",
|
||||
return_value={'status': 'available'})
|
||||
@mock.patch.object(driver.VolumeDriver,
|
||||
"update_group")
|
||||
def test_update_group(self, fake_update_grp,
|
||||
fake_create_grp, fake_rollback,
|
||||
fake_commit, fake_reserve):
|
||||
"""Test group can be updated."""
|
||||
group = tests_utils.create_group(
|
||||
self.context,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
volume_type_ids=[fake.VOLUME_TYPE_ID],
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
host=CONF.host)
|
||||
self.volume.create_group(self.context, group)
|
||||
|
||||
volume = tests_utils.create_volume(
|
||||
self.context,
|
||||
group_id=group.id,
|
||||
volume_type_id=fake.VOLUME_TYPE_ID,
|
||||
status='available',
|
||||
host=group.host)
|
||||
volume_id = volume['id']
|
||||
self.volume.create_volume(self.context, volume_id)
|
||||
|
||||
volume2 = tests_utils.create_volume(
|
||||
self.context,
|
||||
group_id=None,
|
||||
volume_type_id=fake.VOLUME_TYPE_ID,
|
||||
status='available',
|
||||
host=group.host)
|
||||
volume_id2 = volume2['id']
|
||||
self.volume.create_volume(self.context, volume_id2)
|
||||
|
||||
fake_update_grp.return_value = (
|
||||
{'status': fields.GroupStatus.AVAILABLE},
|
||||
[{'id': volume_id2, 'status': 'available'}],
|
||||
[{'id': volume_id, 'status': 'available'}])
|
||||
|
||||
self.volume.update_group(self.context, group,
|
||||
add_volumes=volume_id2,
|
||||
remove_volumes=volume_id)
|
||||
grp = objects.Group.get_by_id(self.context, group.id)
|
||||
expected = {
|
||||
'status': fields.GroupStatus.AVAILABLE,
|
||||
'name': 'test_group',
|
||||
'availability_zone': 'nova',
|
||||
'tenant_id': self.context.project_id,
|
||||
'created_at': 'DONTCARE',
|
||||
'user_id': fake.USER_ID,
|
||||
'group_id': group.id,
|
||||
'group_type': fake.GROUP_TYPE_ID
|
||||
}
|
||||
self.assertEqual(fields.GroupStatus.AVAILABLE, grp.status)
|
||||
self.assertEqual(10, len(self.notifier.notifications),
|
||||
self.notifier.notifications)
|
||||
msg = self.notifier.notifications[6]
|
||||
self.assertEqual('group.update.start', msg['event_type'])
|
||||
self.assertDictMatch(expected, msg['payload'])
|
||||
msg = self.notifier.notifications[8]
|
||||
self.assertEqual('group.update.end', msg['event_type'])
|
||||
self.assertDictMatch(expected, msg['payload'])
|
||||
grpvolumes = db.volume_get_all_by_generic_group(self.context, group.id)
|
||||
grpvol_ids = [grpvol['id'] for grpvol in grpvolumes]
|
||||
# Verify volume is removed.
|
||||
self.assertNotIn(volume_id, grpvol_ids)
|
||||
# Verify volume is added.
|
||||
self.assertIn(volume_id2, grpvol_ids)
|
||||
|
||||
volume3 = tests_utils.create_volume(
|
||||
self.context,
|
||||
group_id=None,
|
||||
host=group.host,
|
||||
volume_type_id=fake.VOLUME_TYPE_ID,
|
||||
status='wrong-status')
|
||||
volume_id3 = volume3['id']
|
||||
|
||||
volume_get_orig = self.volume.db.volume_get
|
||||
self.volume.db.volume_get = mock.Mock(
|
||||
return_value={'status': 'wrong_status',
|
||||
'id': volume_id3})
|
||||
# Try to add a volume in wrong status
|
||||
self.assertRaises(exception.InvalidVolume,
|
||||
self.volume.update_group,
|
||||
self.context,
|
||||
group,
|
||||
add_volumes=volume_id3,
|
||||
remove_volumes=None)
|
||||
self.volume.db.volume_get.reset_mock()
|
||||
self.volume.db.volume_get = volume_get_orig
|
||||
|
||||
@mock.patch.object(driver.VolumeDriver,
|
||||
"create_group",
|
||||
return_value={'status': 'available'})
|
||||
@mock.patch.object(driver.VolumeDriver,
|
||||
"delete_group",
|
||||
return_value=({'status': 'deleted'}, []))
|
||||
@mock.patch.object(driver.VolumeDriver,
|
||||
"create_group_snapshot",
|
||||
return_value={'status': 'available'})
|
||||
@mock.patch.object(driver.VolumeDriver,
|
||||
"delete_group_snapshot",
|
||||
return_value=({'status': 'deleted'}, []))
|
||||
@mock.patch.object(driver.VolumeDriver,
|
||||
"create_group_from_src",
|
||||
return_value=(None, None))
|
||||
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
|
||||
'create_volume_from_snapshot')
|
||||
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
|
||||
'create_cloned_volume')
|
||||
def test_create_group_from_src(self,
|
||||
mock_create_cloned_vol,
|
||||
mock_create_vol_from_snap,
|
||||
mock_create_from_src,
|
||||
mock_delete_grpsnap,
|
||||
mock_create_grpsnap,
|
||||
mock_delete_grp,
|
||||
mock_create_grp):
|
||||
"""Test group can be created and deleted."""
|
||||
group = tests_utils.create_group(
|
||||
self.context,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
status=fields.GroupStatus.AVAILABLE,
|
||||
volume_type_ids=[fake.VOLUME_TYPE_ID],
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
host=CONF.host)
|
||||
volume = tests_utils.create_volume(
|
||||
self.context,
|
||||
group_id=group.id,
|
||||
status='available',
|
||||
host=group.host,
|
||||
volume_type_id=fake.VOLUME_TYPE_ID,
|
||||
size=1)
|
||||
volume_id = volume['id']
|
||||
group_snapshot_returns = self._create_group_snapshot(group.id,
|
||||
[volume_id])
|
||||
group_snapshot = group_snapshot_returns[0]
|
||||
snapshot_id = group_snapshot_returns[1][0]['id']
|
||||
|
||||
# Create group from source group snapshot.
|
||||
group2 = tests_utils.create_group(
|
||||
self.context,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
group_snapshot_id=group_snapshot.id,
|
||||
volume_type_ids=[fake.VOLUME_TYPE_ID],
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
host=CONF.host)
|
||||
group2 = objects.Group.get_by_id(self.context, group2.id)
|
||||
volume2 = tests_utils.create_volume(
|
||||
self.context,
|
||||
group_id=group2.id,
|
||||
snapshot_id=snapshot_id,
|
||||
status='available',
|
||||
host=group2.host,
|
||||
volume_type_id=fake.VOLUME_TYPE_ID)
|
||||
self.volume.create_volume(self.context, volume2.id, volume=volume2)
|
||||
self.volume.create_group_from_src(
|
||||
self.context, group2, group_snapshot=group_snapshot)
|
||||
grp2 = objects.Group.get_by_id(self.context, group2.id)
|
||||
expected = {
|
||||
'status': fields.GroupStatus.AVAILABLE,
|
||||
'name': 'test_group',
|
||||
'availability_zone': 'nova',
|
||||
'tenant_id': self.context.project_id,
|
||||
'created_at': 'DONTCARE',
|
||||
'user_id': fake.USER_ID,
|
||||
'group_id': group2.id,
|
||||
'group_type': fake.GROUP_TYPE_ID,
|
||||
}
|
||||
self.assertEqual(fields.GroupStatus.AVAILABLE, grp2.status)
|
||||
self.assertEqual(group2.id, grp2['id'])
|
||||
self.assertEqual(group_snapshot.id, grp2['group_snapshot_id'])
|
||||
self.assertIsNone(grp2['source_group_id'])
|
||||
|
||||
msg = self.notifier.notifications[2]
|
||||
self.assertEqual('group.create.start', msg['event_type'])
|
||||
self.assertDictMatch(expected, msg['payload'])
|
||||
msg = self.notifier.notifications[4]
|
||||
self.assertEqual('group.create.end', msg['event_type'])
|
||||
self.assertDictMatch(expected, msg['payload'])
|
||||
|
||||
if len(self.notifier.notifications) > 6:
|
||||
self.assertFalse(self.notifier.notifications[6],
|
||||
self.notifier.notifications)
|
||||
self.assertEqual(6, len(self.notifier.notifications),
|
||||
self.notifier.notifications)
|
||||
|
||||
self.volume.delete_group(self.context, group2)
|
||||
|
||||
if len(self.notifier.notifications) > 9:
|
||||
self.assertFalse(self.notifier.notifications[10],
|
||||
self.notifier.notifications)
|
||||
self.assertEqual(9, len(self.notifier.notifications),
|
||||
self.notifier.notifications)
|
||||
|
||||
msg = self.notifier.notifications[6]
|
||||
self.assertEqual('group.delete.start', msg['event_type'])
|
||||
expected['status'] = fields.GroupStatus.AVAILABLE
|
||||
self.assertDictMatch(expected, msg['payload'])
|
||||
msg = self.notifier.notifications[8]
|
||||
self.assertEqual('group.delete.end', msg['event_type'])
|
||||
expected['status'] = fields.GroupStatus.DELETED
|
||||
self.assertDictMatch(expected, msg['payload'])
|
||||
|
||||
grp2 = objects.Group.get_by_id(
|
||||
context.get_admin_context(read_deleted='yes'), group2.id)
|
||||
self.assertEqual(fields.GroupStatus.DELETED, grp2.status)
|
||||
self.assertRaises(exception.NotFound,
|
||||
objects.Group.get_by_id,
|
||||
self.context,
|
||||
group2.id)
|
||||
|
||||
# Create group from source group
|
||||
group3 = tests_utils.create_group(
|
||||
self.context,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
source_group_id=group.id,
|
||||
volume_type_ids=[fake.VOLUME_TYPE_ID],
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
host=CONF.host)
|
||||
volume3 = tests_utils.create_volume(
|
||||
self.context,
|
||||
group_id=group3.id,
|
||||
source_volid=volume_id,
|
||||
status='available',
|
||||
host=group3.host,
|
||||
volume_type_id=fake.VOLUME_TYPE_ID)
|
||||
self.volume.create_volume(self.context, volume3.id, volume=volume3)
|
||||
self.volume.create_group_from_src(
|
||||
self.context, group3, source_group=group)
|
||||
|
||||
grp3 = objects.Group.get_by_id(self.context, group3.id)
|
||||
|
||||
self.assertEqual(fields.GroupStatus.AVAILABLE, grp3.status)
|
||||
self.assertEqual(group3.id, grp3.id)
|
||||
self.assertEqual(group.id, grp3.source_group_id)
|
||||
self.assertIsNone(grp3.group_snapshot_id)
|
||||
|
||||
self.volume.delete_group_snapshot(self.context, group_snapshot)
|
||||
self.volume.delete_group(self.context, group)
|
||||
|
||||
def test_sort_snapshots(self):
|
||||
vol1 = {'id': fake.VOLUME_ID, 'name': 'volume 1',
|
||||
'snapshot_id': fake.SNAPSHOT_ID,
|
||||
'group_id': fake.GROUP_ID}
|
||||
vol2 = {'id': fake.VOLUME2_ID, 'name': 'volume 2',
|
||||
'snapshot_id': fake.SNAPSHOT2_ID,
|
||||
'group_id': fake.GROUP_ID}
|
||||
vol3 = {'id': fake.VOLUME3_ID, 'name': 'volume 3',
|
||||
'snapshot_id': fake.SNAPSHOT3_ID,
|
||||
'group_id': fake.GROUP_ID}
|
||||
snp1 = {'id': fake.SNAPSHOT_ID, 'name': 'snap 1',
|
||||
'group_snapshot_id': fake.GROUP_ID}
|
||||
snp2 = {'id': fake.SNAPSHOT2_ID, 'name': 'snap 2',
|
||||
'group_snapshot_id': fake.GROUP_ID}
|
||||
snp3 = {'id': fake.SNAPSHOT3_ID, 'name': 'snap 3',
|
||||
'group_snapshot_id': fake.GROUP_ID}
|
||||
snp1_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp1)
|
||||
snp2_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp2)
|
||||
snp3_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp3)
|
||||
volumes = []
|
||||
snapshots = []
|
||||
volumes.append(vol1)
|
||||
volumes.append(vol2)
|
||||
volumes.append(vol3)
|
||||
snapshots.append(snp2_obj)
|
||||
snapshots.append(snp3_obj)
|
||||
snapshots.append(snp1_obj)
|
||||
i = 0
|
||||
for vol in volumes:
|
||||
snap = snapshots[i]
|
||||
i += 1
|
||||
self.assertNotEqual(vol['snapshot_id'], snap.id)
|
||||
sorted_snaps = self.volume._sort_snapshots(volumes, snapshots)
|
||||
i = 0
|
||||
for vol in volumes:
|
||||
snap = sorted_snaps[i]
|
||||
i += 1
|
||||
self.assertEqual(vol['snapshot_id'], snap.id)
|
||||
|
||||
snapshots[2]['id'] = fake.WILL_NOT_BE_FOUND_ID
|
||||
self.assertRaises(exception.SnapshotNotFound,
|
||||
self.volume._sort_snapshots,
|
||||
volumes, snapshots)
|
||||
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
self.volume._sort_snapshots,
|
||||
volumes, [])
|
||||
|
||||
def test_sort_source_vols(self):
|
||||
vol1 = {'id': '1', 'name': 'volume 1',
|
||||
'source_volid': '1',
|
||||
'group_id': '2'}
|
||||
vol2 = {'id': '2', 'name': 'volume 2',
|
||||
'source_volid': '2',
|
||||
'group_id': '2'}
|
||||
vol3 = {'id': '3', 'name': 'volume 3',
|
||||
'source_volid': '3',
|
||||
'group_id': '2'}
|
||||
src_vol1 = {'id': '1', 'name': 'source vol 1',
|
||||
'group_id': '1'}
|
||||
src_vol2 = {'id': '2', 'name': 'source vol 2',
|
||||
'group_id': '1'}
|
||||
src_vol3 = {'id': '3', 'name': 'source vol 3',
|
||||
'group_id': '1'}
|
||||
volumes = []
|
||||
src_vols = []
|
||||
volumes.append(vol1)
|
||||
volumes.append(vol2)
|
||||
volumes.append(vol3)
|
||||
src_vols.append(src_vol2)
|
||||
src_vols.append(src_vol3)
|
||||
src_vols.append(src_vol1)
|
||||
i = 0
|
||||
for vol in volumes:
|
||||
src_vol = src_vols[i]
|
||||
i += 1
|
||||
self.assertNotEqual(vol['source_volid'], src_vol['id'])
|
||||
sorted_src_vols = self.volume._sort_source_vols(volumes, src_vols)
|
||||
i = 0
|
||||
for vol in volumes:
|
||||
src_vol = sorted_src_vols[i]
|
||||
i += 1
|
||||
self.assertEqual(vol['source_volid'], src_vol['id'])
|
||||
|
||||
src_vols[2]['id'] = '9999'
|
||||
self.assertRaises(exception.VolumeNotFound,
|
||||
self.volume._sort_source_vols,
|
||||
volumes, src_vols)
|
||||
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
self.volume._sort_source_vols,
|
||||
volumes, [])
|
||||
|
||||
def _create_group_snapshot(self, group_id, volume_ids, size='0'):
|
||||
"""Create a group_snapshot object."""
|
||||
grpsnap = objects.GroupSnapshot(self.context)
|
||||
grpsnap.user_id = fake.USER_ID
|
||||
grpsnap.project_id = fake.PROJECT_ID
|
||||
grpsnap.group_id = group_id
|
||||
grpsnap.status = fields.GroupStatus.CREATING
|
||||
grpsnap.create()
|
||||
|
||||
# Create snapshot list
|
||||
for volume_id in volume_ids:
|
||||
snaps = []
|
||||
snap = objects.Snapshot(context.get_admin_context())
|
||||
snap.volume_size = size
|
||||
snap.user_id = fake.USER_ID
|
||||
snap.project_id = fake.PROJECT_ID
|
||||
snap.volume_id = volume_id
|
||||
snap.status = fields.SnapshotStatus.AVAILABLE
|
||||
snap.group_snapshot_id = grpsnap.id
|
||||
snap.create()
|
||||
snaps.append(snap)
|
||||
|
||||
return grpsnap, snaps
|
||||
|
||||
@mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify')
|
||||
@mock.patch('cinder.volume.driver.VolumeDriver.create_group',
|
||||
autospec=True,
|
||||
return_value={'status': 'available'})
|
||||
@mock.patch('cinder.volume.driver.VolumeDriver.delete_group',
|
||||
autospec=True,
|
||||
return_value=({'status': 'deleted'}, []))
|
||||
@mock.patch('cinder.volume.driver.VolumeDriver.create_group_snapshot',
|
||||
autospec=True,
|
||||
return_value=({'status': 'available'}, []))
|
||||
@mock.patch('cinder.volume.driver.VolumeDriver.delete_group_snapshot',
|
||||
autospec=True,
|
||||
return_value=({'status': 'deleted'}, []))
|
||||
def test_create_delete_group_snapshot(self,
|
||||
mock_del_grpsnap,
|
||||
mock_create_grpsnap,
|
||||
mock_del_grp,
|
||||
_mock_create_grp,
|
||||
mock_notify):
|
||||
"""Test group_snapshot can be created and deleted."""
|
||||
group = tests_utils.create_group(
|
||||
self.context,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
volume_type_ids=[fake.VOLUME_TYPE_ID],
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
host=CONF.host)
|
||||
volume = tests_utils.create_volume(
|
||||
self.context,
|
||||
group_id=group.id,
|
||||
host=group.host,
|
||||
volume_type_id=fake.VOLUME_TYPE_ID)
|
||||
volume_id = volume['id']
|
||||
self.volume.create_volume(self.context, volume_id)
|
||||
|
||||
self.assert_notify_called(mock_notify,
|
||||
(['INFO', 'volume.create.start'],
|
||||
['INFO', 'volume.create.end']))
|
||||
|
||||
group_snapshot_returns = self._create_group_snapshot(group.id,
|
||||
[volume_id])
|
||||
group_snapshot = group_snapshot_returns[0]
|
||||
self.volume.create_group_snapshot(self.context, group_snapshot)
|
||||
self.assertEqual(group_snapshot.id,
|
||||
objects.GroupSnapshot.get_by_id(
|
||||
context.get_admin_context(),
|
||||
group_snapshot.id).id)
|
||||
|
||||
self.assert_notify_called(mock_notify,
|
||||
(['INFO', 'volume.create.start'],
|
||||
['INFO', 'volume.create.end'],
|
||||
['INFO', 'group_snapshot.create.start'],
|
||||
['INFO', 'snapshot.create.start'],
|
||||
['INFO', 'group_snapshot.create.end'],
|
||||
['INFO', 'snapshot.create.end']))
|
||||
|
||||
self.volume.delete_group_snapshot(self.context, group_snapshot)
|
||||
|
||||
self.assert_notify_called(mock_notify,
|
||||
(['INFO', 'volume.create.start'],
|
||||
['INFO', 'volume.create.end'],
|
||||
['INFO', 'group_snapshot.create.start'],
|
||||
['INFO', 'snapshot.create.start'],
|
||||
['INFO', 'group_snapshot.create.end'],
|
||||
['INFO', 'snapshot.create.end'],
|
||||
['INFO', 'group_snapshot.delete.start'],
|
||||
['INFO', 'snapshot.delete.start'],
|
||||
['INFO', 'group_snapshot.delete.end'],
|
||||
['INFO', 'snapshot.delete.end']))
|
||||
|
||||
grpsnap = objects.GroupSnapshot.get_by_id(
|
||||
context.get_admin_context(read_deleted='yes'),
|
||||
group_snapshot.id)
|
||||
self.assertEqual('deleted', grpsnap.status)
|
||||
self.assertRaises(exception.NotFound,
|
||||
objects.GroupSnapshot.get_by_id,
|
||||
self.context,
|
||||
group_snapshot.id)
|
||||
|
||||
self.volume.delete_group(self.context, group)
|
||||
|
||||
self.assertTrue(mock_create_grpsnap.called)
|
||||
self.assertTrue(mock_del_grpsnap.called)
|
||||
self.assertTrue(mock_del_grp.called)
|
||||
|
||||
@mock.patch('cinder.volume.driver.VolumeDriver.create_group',
|
||||
return_value={'status': 'available'})
|
||||
@mock.patch('cinder.volume.driver.VolumeDriver.delete_group',
|
||||
return_value=({'status': 'deleted'}, []))
|
||||
def test_delete_group_correct_host(self,
|
||||
mock_del_grp,
|
||||
_mock_create_grp):
|
||||
"""Test group can be deleted.
|
||||
|
||||
Test group can be deleted when volumes are on
|
||||
the correct volume node.
|
||||
"""
|
||||
group = tests_utils.create_group(
|
||||
self.context,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
volume_type_ids=[fake.VOLUME_TYPE_ID],
|
||||
group_type_id=fake.GROUP_TYPE_ID)
|
||||
volume = tests_utils.create_volume(
|
||||
self.context,
|
||||
group_id=group.id,
|
||||
host='host1@backend1#pool1',
|
||||
status='creating',
|
||||
volume_type_id=fake.VOLUME_TYPE_ID,
|
||||
size=1)
|
||||
self.volume.host = 'host1@backend1'
|
||||
self.volume.create_volume(self.context, volume.id, volume=volume)
|
||||
|
||||
self.volume.delete_group(self.context, group)
|
||||
grp = objects.Group.get_by_id(
|
||||
context.get_admin_context(read_deleted='yes'),
|
||||
group.id)
|
||||
self.assertEqual(fields.GroupStatus.DELETED, grp.status)
|
||||
self.assertRaises(exception.NotFound,
|
||||
objects.Group.get_by_id,
|
||||
self.context,
|
||||
group.id)
|
||||
|
||||
self.assertTrue(mock_del_grp.called)
|
||||
|
||||
@mock.patch('cinder.volume.driver.VolumeDriver.create_group',
|
||||
return_value={'status': 'available'})
|
||||
def test_delete_group_wrong_host(self, *_mock_create_grp):
|
||||
"""Test group cannot be deleted.
|
||||
|
||||
Test group cannot be deleted when volumes in the
|
||||
group are not local to the volume node.
|
||||
"""
|
||||
group = tests_utils.create_group(
|
||||
self.context,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
volume_type_ids=[fake.VOLUME_TYPE_ID],
|
||||
group_type_id=fake.GROUP_TYPE_ID)
|
||||
volume = tests_utils.create_volume(
|
||||
self.context,
|
||||
group_id=group.id,
|
||||
host='host1@backend1#pool1',
|
||||
status='creating',
|
||||
volume_type_id=fake.VOLUME_TYPE_ID,
|
||||
size=1)
|
||||
self.volume.host = 'host1@backend2'
|
||||
self.volume.create_volume(self.context, volume.id, volume=volume)
|
||||
|
||||
self.assertRaises(exception.InvalidVolume,
|
||||
self.volume.delete_group,
|
||||
self.context,
|
||||
group)
|
||||
grp = objects.Group.get_by_id(self.context, group.id)
|
||||
# Group is not deleted
|
||||
self.assertEqual(fields.GroupStatus.AVAILABLE, grp.status)
|
||||
|
||||
def test_create_volume_with_group_invalid_type(self):
|
||||
"""Test volume creation with group & invalid volume type."""
|
||||
vol_type = db.volume_type_create(
|
||||
context.get_admin_context(),
|
||||
dict(name=conf_fixture.def_vol_type, extra_specs={})
|
||||
)
|
||||
db_vol_type = db.volume_type_get(context.get_admin_context(),
|
||||
vol_type.id)
|
||||
|
||||
grp = tests_utils.create_group(
|
||||
self.context,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
status=fields.GroupStatus.AVAILABLE,
|
||||
volume_type_ids=[db_vol_type['id']],
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
host=CONF.host)
|
||||
|
||||
fake_type = {
|
||||
'id': '9999',
|
||||
'name': 'fake',
|
||||
}
|
||||
vol_api = cinder.volume.api.API()
|
||||
|
||||
# Volume type must be provided when creating a volume in a
|
||||
# group.
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
vol_api.create,
|
||||
self.context, 1, 'vol1', 'volume 1',
|
||||
group=grp)
|
||||
|
||||
# Volume type must be valid.
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
vol_api.create,
|
||||
self.context, 1, 'vol1', 'volume 1',
|
||||
volume_type=fake_type,
|
||||
group=grp)
|
||||
|
||||
@mock.patch('cinder.volume.driver.VolumeDriver.create_group_snapshot',
|
||||
autospec=True,
|
||||
return_value=({'status': 'available'}, []))
|
||||
def test_create_group_snapshot_with_bootable_volumes(self,
|
||||
mock_create_grpsnap):
|
||||
"""Test group_snapshot can be created and deleted."""
|
||||
group = tests_utils.create_group(
|
||||
self.context,
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
volume_type_ids=[fake.VOLUME_TYPE_ID],
|
||||
group_type_id=fake.GROUP_TYPE_ID,
|
||||
host=CONF.host)
|
||||
volume = tests_utils.create_volume(
|
||||
self.context,
|
||||
group_id=group.id,
|
||||
host=group.host,
|
||||
volume_type_id=fake.VOLUME_TYPE_ID)
|
||||
volume_id = volume['id']
|
||||
self.volume.create_volume(self.context, volume_id)
|
||||
# Create a bootable volume
|
||||
bootable_vol_params = {'status': 'creating', 'host': CONF.host,
|
||||
'size': 1, 'bootable': True}
|
||||
bootable_vol = tests_utils.create_volume(self.context,
|
||||
group_id=group.id,
|
||||
**bootable_vol_params)
|
||||
# Create a common volume
|
||||
bootable_vol_id = bootable_vol['id']
|
||||
self.volume.create_volume(self.context, bootable_vol_id)
|
||||
|
||||
volume_ids = [volume_id, bootable_vol_id]
|
||||
group_snapshot_returns = self._create_group_snapshot(group.id,
|
||||
volume_ids)
|
||||
group_snapshot = group_snapshot_returns[0]
|
||||
self.volume.create_group_snapshot(self.context, group_snapshot)
|
||||
self.assertEqual(group_snapshot.id,
|
||||
objects.GroupSnapshot.get_by_id(
|
||||
context.get_admin_context(),
|
||||
group_snapshot.id).id)
|
||||
self.assertTrue(mock_create_grpsnap.called)
|
@ -1773,6 +1773,123 @@ class BaseVD(object):
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_group_from_src(self, context, group, volumes,
|
||||
group_snapshot=None, snapshots=None,
|
||||
source_group=None, source_vols=None):
|
||||
"""Creates a group from source.
|
||||
|
||||
:param context: the context of the caller.
|
||||
:param group: the Group object to be created.
|
||||
:param volumes: a list of Volume objects in the group.
|
||||
:param group_snapshot: the GroupSnapshot object as source.
|
||||
:param snapshots: a list of snapshot objects in group_snapshot.
|
||||
:param source_group: the Group object as source.
|
||||
:param source_vols: a list of volume objects in the source_group.
|
||||
:returns: model_update, volumes_model_update
|
||||
|
||||
The source can be group_snapshot or a source_group.
|
||||
|
||||
param volumes is retrieved directly from the db. It is a list of
|
||||
cinder.db.sqlalchemy.models.Volume to be precise. It cannot be
|
||||
assigned to volumes_model_update. volumes_model_update is a list of
|
||||
dictionaries. It has to be built by the driver. An entry will be
|
||||
in this format: {'id': xxx, 'status': xxx, ......}. model_update
|
||||
will be in this format: {'status': xxx, ......}.
|
||||
|
||||
To be consistent with other volume operations, the manager will
|
||||
assume the operation is successful if no exception is thrown by
|
||||
the driver. For a successful operation, the driver can either build
|
||||
the model_update and volumes_model_update and return them or
|
||||
return None, None.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_group_snapshot(self, context, group_snapshot, snapshots):
|
||||
"""Creates a group_snapshot.
|
||||
|
||||
:param context: the context of the caller.
|
||||
:param group_snapshot: the GroupSnapshot object to be created.
|
||||
:param snapshots: a list of Snapshot objects in the group_snapshot.
|
||||
:returns: model_update, snapshots_model_update
|
||||
|
||||
param snapshots is a list of Snapshot objects. It cannot be assigned
|
||||
to snapshots_model_update. snapshots_model_update is a list of
|
||||
dictionaries. It has to be built by the driver. An entry will be
|
||||
in this format: {'id': xxx, 'status': xxx, ......}. model_update
|
||||
will be in this format: {'status': xxx, ......}.
|
||||
|
||||
The driver should populate snapshots_model_update and model_update
|
||||
and return them.
|
||||
|
||||
The manager will check snapshots_model_update and update db accordingly
|
||||
for each snapshot. If the driver successfully deleted some snapshots
|
||||
but failed to delete others, it should set statuses of the snapshots
|
||||
accordingly so that the manager can update db correctly.
|
||||
|
||||
If the status in any entry of snapshots_model_update is 'error', the
|
||||
status in model_update will be set to the same if it is not already
|
||||
'error'.
|
||||
|
||||
If the status in model_update is 'error', the manager will raise an
|
||||
exception and the status of group_snapshot will be set to 'error' in
|
||||
the db. If snapshots_model_update is not returned by the driver, the
|
||||
manager will set the status of every snapshot to 'error' in the except
|
||||
block.
|
||||
|
||||
If the driver raises an exception during the operation, it will be
|
||||
caught by the try-except block in the manager and the statuses of
|
||||
group_snapshot and all snapshots will be set to 'error'.
|
||||
|
||||
For a successful operation, the driver can either build the
|
||||
model_update and snapshots_model_update and return them or
|
||||
return None, None. The statuses of group_snapshot and all snapshots
|
||||
will be set to 'available' at the end of the manager function.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete_group_snapshot(self, context, group_snapshot, snapshots):
|
||||
"""Deletes a group_snapshot.
|
||||
|
||||
:param context: the context of the caller.
|
||||
:param group_snapshot: the GroupSnapshot object to be deleted.
|
||||
:param snapshots: a list of snapshot objects in the group_snapshot.
|
||||
:returns: model_update, snapshots_model_update
|
||||
|
||||
param snapshots is a list of objects. It cannot be assigned to
|
||||
snapshots_model_update. snapshots_model_update is a list of of
|
||||
dictionaries. It has to be built by the driver. An entry will be
|
||||
in this format: {'id': xxx, 'status': xxx, ......}. model_update
|
||||
will be in this format: {'status': xxx, ......}.
|
||||
|
||||
The driver should populate snapshots_model_update and model_update
|
||||
and return them.
|
||||
|
||||
The manager will check snapshots_model_update and update db accordingly
|
||||
for each snapshot. If the driver successfully deleted some snapshots
|
||||
but failed to delete others, it should set statuses of the snapshots
|
||||
accordingly so that the manager can update db correctly.
|
||||
|
||||
If the status in any entry of snapshots_model_update is
|
||||
'error_deleting' or 'error', the status in model_update will be set to
|
||||
the same if it is not already 'error_deleting' or 'error'.
|
||||
|
||||
If the status in model_update is 'error_deleting' or 'error', the
|
||||
manager will raise an exception and the status of group_snapshot will
|
||||
be set to 'error' in the db. If snapshots_model_update is not returned
|
||||
by the driver, the manager will set the status of every snapshot to
|
||||
'error' in the except block.
|
||||
|
||||
If the driver raises an exception during the operation, it will be
|
||||
caught by the try-except block in the manager and the statuses of
|
||||
group_snapshot and all snapshots will be set to 'error'.
|
||||
|
||||
For a successful operation, the driver can either build the
|
||||
model_update and snapshots_model_update and return them or
|
||||
return None, None. The statuses of group_snapshot and all snapshots
|
||||
will be set to 'deleted' after the manager deletes them from db.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class LocalVD(object):
|
||||
|
@ -101,7 +101,9 @@ VALID_ADD_VOL_TO_GROUP_STATUS = (
|
||||
'available',
|
||||
'in-use')
|
||||
VALID_CREATE_CG_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
|
||||
VALID_CREATE_GROUP_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
|
||||
VALID_CREATE_CG_SRC_CG_STATUS = ('available',)
|
||||
VALID_CREATE_GROUP_SRC_GROUP_STATUS = ('available',)
|
||||
|
||||
volume_manager_opts = [
|
||||
cfg.StrOpt('volume_driver',
|
||||
@ -2075,6 +2077,25 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
context, snapshot, event_suffix,
|
||||
extra_usage_info=extra_usage_info, host=self.host)
|
||||
|
||||
def _notify_about_group_snapshot_usage(self,
|
||||
context,
|
||||
group_snapshot,
|
||||
event_suffix,
|
||||
snapshots=None,
|
||||
extra_usage_info=None):
|
||||
vol_utils.notify_about_group_snapshot_usage(
|
||||
context, group_snapshot, event_suffix,
|
||||
extra_usage_info=extra_usage_info, host=self.host)
|
||||
|
||||
if not snapshots:
|
||||
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
|
||||
context, group_snapshot.id)
|
||||
if snapshots:
|
||||
for snapshot in snapshots:
|
||||
vol_utils.notify_about_snapshot_usage(
|
||||
context, snapshot, event_suffix,
|
||||
extra_usage_info=extra_usage_info, host=self.host)
|
||||
|
||||
def extend_volume(self, context, volume_id, new_size, reservations,
|
||||
volume=None):
|
||||
# FIXME(dulek): Remove this in v3.0 of RPC API.
|
||||
@ -2664,6 +2685,181 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
'id': group.id})
|
||||
return group
|
||||
|
||||
def create_group_from_src(self, context, group,
|
||||
group_snapshot=None, source_group=None):
|
||||
"""Creates the group from source.
|
||||
|
||||
The source can be a group snapshot or a source group.
|
||||
"""
|
||||
source_name = None
|
||||
snapshots = None
|
||||
source_vols = None
|
||||
try:
|
||||
volumes = objects.VolumeList.get_all_by_generic_group(context,
|
||||
group.id)
|
||||
if group_snapshot:
|
||||
try:
|
||||
# Check if group_snapshot still exists
|
||||
group_snapshot = objects.GroupSnapshot.get_by_id(
|
||||
context, group_snapshot.id)
|
||||
except exception.GroupSnapshotNotFound:
|
||||
LOG.error(_LE("Create group "
|
||||
"from snapshot-%(snap)s failed: "
|
||||
"SnapshotNotFound."),
|
||||
{'snap': group_snapshot.id},
|
||||
resource={'type': 'group',
|
||||
'id': group.id})
|
||||
raise
|
||||
|
||||
source_name = _("snapshot-%s") % group_snapshot.id
|
||||
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
|
||||
context, group_snapshot.id)
|
||||
for snap in snapshots:
|
||||
if (snap.status not in
|
||||
VALID_CREATE_GROUP_SRC_SNAP_STATUS):
|
||||
msg = (_("Cannot create group "
|
||||
"%(group)s because snapshot %(snap)s is "
|
||||
"not in a valid state. Valid states are: "
|
||||
"%(valid)s.") %
|
||||
{'group': group.id,
|
||||
'snap': snap['id'],
|
||||
'valid': VALID_CREATE_GROUP_SRC_SNAP_STATUS})
|
||||
raise exception.InvalidGroup(reason=msg)
|
||||
|
||||
if source_group:
|
||||
try:
|
||||
source_group = objects.Group.get_by_id(
|
||||
context, source_group.id)
|
||||
except exception.GroupNotFound:
|
||||
LOG.error(_LE("Create group "
|
||||
"from source group-%(group)s failed: "
|
||||
"GroupNotFound."),
|
||||
{'group': source_group.id},
|
||||
resource={'type': 'group',
|
||||
'id': group.id})
|
||||
raise
|
||||
|
||||
source_name = _("group-%s") % source_group.id
|
||||
source_vols = objects.VolumeList.get_all_by_generic_group(
|
||||
context, source_group.id)
|
||||
for source_vol in source_vols:
|
||||
if (source_vol.status not in
|
||||
VALID_CREATE_GROUP_SRC_GROUP_STATUS):
|
||||
msg = (_("Cannot create group "
|
||||
"%(group)s because source volume "
|
||||
"%(source_vol)s is not in a valid "
|
||||
"state. Valid states are: "
|
||||
"%(valid)s.") %
|
||||
{'group': group.id,
|
||||
'source_vol': source_vol.id,
|
||||
'valid': VALID_CREATE_GROUP_SRC_GROUP_STATUS})
|
||||
raise exception.InvalidGroup(reason=msg)
|
||||
|
||||
# Sort source snapshots so that they are in the same order as their
|
||||
# corresponding target volumes.
|
||||
sorted_snapshots = None
|
||||
if group_snapshot and snapshots:
|
||||
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
|
||||
|
||||
# Sort source volumes so that they are in the same order as their
|
||||
# corresponding target volumes.
|
||||
sorted_source_vols = None
|
||||
if source_group and source_vols:
|
||||
sorted_source_vols = self._sort_source_vols(volumes,
|
||||
source_vols)
|
||||
|
||||
self._notify_about_group_usage(
|
||||
context, group, "create.start")
|
||||
|
||||
utils.require_driver_initialized(self.driver)
|
||||
|
||||
try:
|
||||
model_update, volumes_model_update = (
|
||||
self.driver.create_group_from_src(
|
||||
context, group, volumes, group_snapshot,
|
||||
sorted_snapshots, source_group, sorted_source_vols))
|
||||
except NotImplementedError:
|
||||
model_update, volumes_model_update = (
|
||||
self._create_group_from_src_generic(
|
||||
context, group, volumes, group_snapshot,
|
||||
sorted_snapshots, source_group, sorted_source_vols))
|
||||
|
||||
if volumes_model_update:
|
||||
for update in volumes_model_update:
|
||||
self.db.volume_update(context, update['id'], update)
|
||||
|
||||
if model_update:
|
||||
group.update(model_update)
|
||||
group.save()
|
||||
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
group.status = 'error'
|
||||
group.save()
|
||||
LOG.error(_LE("Create group "
|
||||
"from source %(source)s failed."),
|
||||
{'source': source_name},
|
||||
resource={'type': 'group',
|
||||
'id': group.id})
|
||||
# Update volume status to 'error' as well.
|
||||
for vol in volumes:
|
||||
vol.status = 'error'
|
||||
vol.save()
|
||||
|
||||
now = timeutils.utcnow()
|
||||
status = 'available'
|
||||
for vol in volumes:
|
||||
update = {'status': status, 'created_at': now}
|
||||
self._update_volume_from_src(context, vol, update, group=group)
|
||||
self._update_allocated_capacity(vol)
|
||||
|
||||
group.status = status
|
||||
group.created_at = now
|
||||
group.save()
|
||||
|
||||
self._notify_about_group_usage(
|
||||
context, group, "create.end")
|
||||
LOG.info(_LI("Create group "
|
||||
"from source-%(source)s completed successfully."),
|
||||
{'source': source_name},
|
||||
resource={'type': 'group',
|
||||
'id': group.id})
|
||||
return group
|
||||
|
||||
def _create_group_from_src_generic(self, context, group, volumes,
|
||||
group_snapshot=None, snapshots=None,
|
||||
source_group=None, source_vols=None):
|
||||
"""Creates a group from source.
|
||||
|
||||
:param context: the context of the caller.
|
||||
:param group: the Group object to be created.
|
||||
:param volumes: a list of volume objects in the group.
|
||||
:param group_snapshot: the GroupSnapshot object as source.
|
||||
:param snapshots: a list of snapshot objects in group_snapshot.
|
||||
:param source_group: the Group object as source.
|
||||
:param source_vols: a list of volume objects in the source_group.
|
||||
:returns: model_update, volumes_model_update
|
||||
"""
|
||||
for vol in volumes:
|
||||
try:
|
||||
if snapshots:
|
||||
for snapshot in snapshots:
|
||||
if vol.snapshot_id == snapshot.id:
|
||||
self.driver.create_volume_from_snapshot(
|
||||
vol, snapshot)
|
||||
break
|
||||
except Exception:
|
||||
raise
|
||||
try:
|
||||
if source_vols:
|
||||
for source_vol in source_vols:
|
||||
if vol.source_volid == source_vol.id:
|
||||
self.driver.create_cloned_volume(vol, source_vol)
|
||||
break
|
||||
except Exception:
|
||||
raise
|
||||
return None, None
|
||||
|
||||
def _sort_snapshots(self, volumes, snapshots):
|
||||
# Sort source snapshots so that they are in the same order as their
|
||||
# corresponding target volumes. Each source snapshot in the snapshots
|
||||
@ -3460,6 +3656,152 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
context, cgsnapshot, "create.end")
|
||||
return cgsnapshot
|
||||
|
||||
def create_group_snapshot(self, context, group_snapshot):
|
||||
"""Creates the group_snapshot."""
|
||||
caller_context = context
|
||||
context = context.elevated()
|
||||
|
||||
LOG.info(_LI("GroupSnapshot %s: creating."), group_snapshot.id)
|
||||
|
||||
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
|
||||
context, group_snapshot.id)
|
||||
|
||||
self._notify_about_group_snapshot_usage(
|
||||
context, group_snapshot, "create.start")
|
||||
|
||||
snapshots_model_update = None
|
||||
model_update = None
|
||||
try:
|
||||
utils.require_driver_initialized(self.driver)
|
||||
|
||||
LOG.debug("Group snapshot %(grp_snap_id)s: creating.",
|
||||
{'grp_snap_id': group_snapshot.id})
|
||||
|
||||
# Pass context so that drivers that want to use it, can,
|
||||
# but it is not a requirement for all drivers.
|
||||
group_snapshot.context = caller_context
|
||||
for snapshot in snapshots:
|
||||
snapshot.context = caller_context
|
||||
|
||||
try:
|
||||
model_update, snapshots_model_update = (
|
||||
self.driver.create_group_snapshot(context, group_snapshot,
|
||||
snapshots))
|
||||
except NotImplementedError:
|
||||
model_update, snapshots_model_update = (
|
||||
self._create_group_snapshot_generic(
|
||||
context, group_snapshot, snapshots))
|
||||
|
||||
if snapshots_model_update:
|
||||
for snap_model in snapshots_model_update:
|
||||
# Update db for snapshot.
|
||||
# NOTE(xyang): snapshots is a list of snapshot objects.
|
||||
# snapshots_model_update should be a list of dicts.
|
||||
snap_id = snap_model.pop('id')
|
||||
snap_obj = objects.Snapshot.get_by_id(context, snap_id)
|
||||
snap_obj.update(snap_model)
|
||||
snap_obj.save()
|
||||
if (snap_model['status'] in [
|
||||
fields.SnapshotStatus.ERROR_DELETING,
|
||||
fields.SnapshotStatus.ERROR] and
|
||||
model_update['status'] not in
|
||||
['error_deleting', 'error']):
|
||||
model_update['status'] = snap_model['status']
|
||||
|
||||
if model_update:
|
||||
if model_update['status'] == 'error':
|
||||
msg = (_('Error occurred when creating group_snapshot '
|
||||
'%s.') % group_snapshot.id)
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeDriverException(message=msg)
|
||||
|
||||
group_snapshot.update(model_update)
|
||||
group_snapshot.save()
|
||||
|
||||
except exception.CinderException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
group_snapshot.status = 'error'
|
||||
group_snapshot.save()
|
||||
# Update snapshot status to 'error' if driver returns
|
||||
# None for snapshots_model_update.
|
||||
if not snapshots_model_update:
|
||||
for snapshot in snapshots:
|
||||
snapshot.status = fields.SnapshotStatus.ERROR
|
||||
snapshot.save()
|
||||
|
||||
for snapshot in snapshots:
|
||||
volume_id = snapshot.volume_id
|
||||
snapshot_id = snapshot.id
|
||||
vol_obj = objects.Volume.get_by_id(context, volume_id)
|
||||
if vol_obj.bootable:
|
||||
try:
|
||||
self.db.volume_glance_metadata_copy_to_snapshot(
|
||||
context, snapshot_id, volume_id)
|
||||
except exception.GlanceMetadataNotFound:
|
||||
# If volume is not created from image, No glance metadata
|
||||
# would be available for that volume in
|
||||
# volume glance metadata table
|
||||
pass
|
||||
except exception.CinderException as ex:
|
||||
LOG.error(_LE("Failed updating %(snapshot_id)s"
|
||||
" metadata using the provided volumes"
|
||||
" %(volume_id)s metadata"),
|
||||
{'volume_id': volume_id,
|
||||
'snapshot_id': snapshot_id})
|
||||
snapshot.status = fields.SnapshotStatus.ERROR
|
||||
snapshot.save()
|
||||
raise exception.MetadataCopyFailure(
|
||||
reason=six.text_type(ex))
|
||||
|
||||
snapshot.status = fields.SnapshotStatus.AVAILABLE
|
||||
snapshot.progress = '100%'
|
||||
snapshot.save()
|
||||
|
||||
group_snapshot.status = 'available'
|
||||
group_snapshot.save()
|
||||
|
||||
LOG.info(_LI("group_snapshot %s: created successfully"),
|
||||
group_snapshot.id)
|
||||
self._notify_about_group_snapshot_usage(
|
||||
context, group_snapshot, "create.end")
|
||||
return group_snapshot
|
||||
|
||||
def _create_group_snapshot_generic(self, context, group_snapshot,
|
||||
snapshots):
|
||||
"""Creates a group_snapshot."""
|
||||
model_update = {'status': 'available'}
|
||||
snapshot_model_updates = []
|
||||
for snapshot in snapshots:
|
||||
snapshot_model_update = {'id': snapshot.id}
|
||||
try:
|
||||
self.driver.create_snapshot(snapshot)
|
||||
snapshot_model_update['status'] = 'available'
|
||||
except Exception:
|
||||
snapshot_model_update['status'] = 'error'
|
||||
model_update['status'] = 'error'
|
||||
snapshot_model_updates.append(snapshot_model_update)
|
||||
|
||||
return model_update, snapshot_model_updates
|
||||
|
||||
def _delete_group_snapshot_generic(self, context, group_snapshot,
|
||||
snapshots):
|
||||
"""Deletes a group_snapshot."""
|
||||
model_update = {'status': group_snapshot.status}
|
||||
snapshot_model_updates = []
|
||||
for snapshot in snapshots:
|
||||
snapshot_model_update = {'id': snapshot.id}
|
||||
try:
|
||||
self.driver.delete_snapshot(snapshot)
|
||||
snapshot_model_update['status'] = 'deleted'
|
||||
except exception.SnapshotIsBusy:
|
||||
snapshot_model_update['status'] = 'available'
|
||||
except Exception:
|
||||
snapshot_model_update['status'] = 'error'
|
||||
model_update['status'] = 'error'
|
||||
snapshot_model_updates.append(snapshot_model_update)
|
||||
|
||||
return model_update, snapshot_model_updates
|
||||
|
||||
def delete_cgsnapshot(self, context, cgsnapshot):
|
||||
"""Deletes cgsnapshot."""
|
||||
caller_context = context
|
||||
@ -3568,6 +3910,120 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
self._notify_about_cgsnapshot_usage(context, cgsnapshot, "delete.end",
|
||||
snapshots)
|
||||
|
||||
def delete_group_snapshot(self, context, group_snapshot):
|
||||
"""Deletes group_snapshot."""
|
||||
caller_context = context
|
||||
context = context.elevated()
|
||||
project_id = group_snapshot.project_id
|
||||
|
||||
LOG.info(_LI("group_snapshot %s: deleting"), group_snapshot.id)
|
||||
|
||||
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
|
||||
context, group_snapshot.id)
|
||||
|
||||
self._notify_about_group_snapshot_usage(
|
||||
context, group_snapshot, "delete.start")
|
||||
|
||||
snapshots_model_update = None
|
||||
model_update = None
|
||||
try:
|
||||
utils.require_driver_initialized(self.driver)
|
||||
|
||||
LOG.debug("group_snapshot %(grp_snap_id)s: deleting",
|
||||
{'grp_snap_id': group_snapshot.id})
|
||||
|
||||
# Pass context so that drivers that want to use it, can,
|
||||
# but it is not a requirement for all drivers.
|
||||
group_snapshot.context = caller_context
|
||||
for snapshot in snapshots:
|
||||
snapshot.context = caller_context
|
||||
|
||||
try:
|
||||
model_update, snapshots_model_update = (
|
||||
self.driver.delete_group_snapshot(context, group_snapshot,
|
||||
snapshots))
|
||||
except NotImplementedError:
|
||||
model_update, snapshots_model_update = (
|
||||
self._delete_group_snapshot_generic(
|
||||
context, group_snapshot, snapshots))
|
||||
|
||||
if snapshots_model_update:
|
||||
for snap_model in snapshots_model_update:
|
||||
# NOTE(xyang): snapshots is a list of snapshot objects.
|
||||
# snapshots_model_update should be a list of dicts.
|
||||
snap = next((item for item in snapshots if
|
||||
item.id == snap_model['id']), None)
|
||||
if snap:
|
||||
snap_model.pop('id')
|
||||
snap.update(snap_model)
|
||||
snap.save()
|
||||
|
||||
if (snap_model['status'] in
|
||||
[fields.SnapshotStatus.ERROR_DELETING,
|
||||
fields.SnapshotStatus.ERROR] and
|
||||
model_update['status'] not in
|
||||
['error_deleting', 'error']):
|
||||
model_update['status'] = snap_model['status']
|
||||
|
||||
if model_update:
|
||||
if model_update['status'] in ['error_deleting', 'error']:
|
||||
msg = (_('Error occurred when deleting group_snapshot '
|
||||
'%s.') % group_snapshot.id)
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeDriverException(message=msg)
|
||||
else:
|
||||
group_snapshot.update(model_update)
|
||||
group_snapshot.save()
|
||||
|
||||
except exception.CinderException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
group_snapshot.status = 'error'
|
||||
group_snapshot.save()
|
||||
# Update snapshot status to 'error' if driver returns
|
||||
# None for snapshots_model_update.
|
||||
if not snapshots_model_update:
|
||||
for snapshot in snapshots:
|
||||
snapshot.status = fields.SnapshotStatus.ERROR
|
||||
snapshot.save()
|
||||
|
||||
for snapshot in snapshots:
|
||||
# Get reservations
|
||||
try:
|
||||
if CONF.no_snapshot_gb_quota:
|
||||
reserve_opts = {'snapshots': -1}
|
||||
else:
|
||||
reserve_opts = {
|
||||
'snapshots': -1,
|
||||
'gigabytes': -snapshot.volume_size,
|
||||
}
|
||||
volume_ref = objects.Volume.get_by_id(context,
|
||||
snapshot.volume_id)
|
||||
QUOTAS.add_volume_type_opts(context,
|
||||
reserve_opts,
|
||||
volume_ref.volume_type_id)
|
||||
reservations = QUOTAS.reserve(context,
|
||||
project_id=project_id,
|
||||
**reserve_opts)
|
||||
|
||||
except Exception:
|
||||
reservations = None
|
||||
LOG.exception(_LE("Failed to update usages deleting snapshot"))
|
||||
|
||||
self.db.volume_glance_metadata_delete_by_snapshot(context,
|
||||
snapshot.id)
|
||||
snapshot.destroy()
|
||||
|
||||
# Commit the reservations
|
||||
if reservations:
|
||||
QUOTAS.commit(context, reservations, project_id=project_id)
|
||||
|
||||
group_snapshot.destroy()
|
||||
LOG.info(_LI("group_snapshot %s: deleted successfully"),
|
||||
group_snapshot.id)
|
||||
self._notify_about_group_snapshot_usage(context, group_snapshot,
|
||||
"delete.end",
|
||||
snapshots)
|
||||
|
||||
def update_migrated_volume(self, ctxt, volume, new_volume, volume_status):
|
||||
"""Finalize migration process on backend device."""
|
||||
model_update = None
|
||||
|
@ -248,6 +248,7 @@ def _usage_from_group(group_ref, **kw):
|
||||
user_id=group_ref.user_id,
|
||||
availability_zone=group_ref.availability_zone,
|
||||
group_id=group_ref.id,
|
||||
group_type=group_ref.group_type_id,
|
||||
name=group_ref.name,
|
||||
created_at=group_ref.created_at.isoformat(),
|
||||
status=group_ref.status)
|
||||
@ -287,6 +288,21 @@ def _usage_from_cgsnapshot(cgsnapshot, **kw):
|
||||
return usage_info
|
||||
|
||||
|
||||
def _usage_from_group_snapshot(group_snapshot, **kw):
|
||||
usage_info = dict(
|
||||
tenant_id=group_snapshot.project_id,
|
||||
user_id=group_snapshot.user_id,
|
||||
group_snapshot_id=group_snapshot.id,
|
||||
name=group_snapshot.name,
|
||||
group_id=group_snapshot.group_id,
|
||||
group_type=group_snapshot.group_type_id,
|
||||
created_at=group_snapshot.created_at.isoformat(),
|
||||
status=group_snapshot.status)
|
||||
|
||||
usage_info.update(kw)
|
||||
return usage_info
|
||||
|
||||
|
||||
def notify_about_cgsnapshot_usage(context, cgsnapshot, event_suffix,
|
||||
extra_usage_info=None, host=None):
|
||||
if not host:
|
||||
@ -304,6 +320,23 @@ def notify_about_cgsnapshot_usage(context, cgsnapshot, event_suffix,
|
||||
usage_info)
|
||||
|
||||
|
||||
def notify_about_group_snapshot_usage(context, group_snapshot, event_suffix,
|
||||
extra_usage_info=None, host=None):
|
||||
if not host:
|
||||
host = CONF.host
|
||||
|
||||
if not extra_usage_info:
|
||||
extra_usage_info = {}
|
||||
|
||||
usage_info = _usage_from_group_snapshot(group_snapshot,
|
||||
**extra_usage_info)
|
||||
|
||||
rpc.get_notifier("group_snapshot", host).info(
|
||||
context,
|
||||
'group_snapshot.%s' % event_suffix,
|
||||
usage_info)
|
||||
|
||||
|
||||
def _calculate_count(size_in_m, blocksize):
|
||||
|
||||
# Check if volume_dd_blocksize is valid
|
||||
|
Loading…
x
Reference in New Issue
Block a user