Merge "Add group snapshots - db and objects"

This commit is contained in:
Jenkins 2016-08-30 23:14:27 +00:00 committed by Gerrit Code Review
commit 3861399ec4
15 changed files with 935 additions and 22 deletions

@ -428,6 +428,11 @@ def snapshot_get_all_for_cgsnapshot(context, project_id):
return IMPL.snapshot_get_all_for_cgsnapshot(context, project_id)
def snapshot_get_all_for_group_snapshot(context, project_id):
"""Get all snapshots belonging to a group snapshot."""
return IMPL.snapshot_get_all_for_group_snapshot(context, project_id)
def snapshot_get_all_for_volume(context, volume_id):
"""Get all snapshots for a volume."""
return IMPL.snapshot_get_all_for_volume(context, volume_id)
@ -1314,9 +1319,9 @@ def group_get_all(context, filters=None, marker=None, limit=None,
sort_dirs=sort_dirs)
def group_create(context, values):
def group_create(context, values, group_snapshot_id=None, group_id=None):
"""Create a group from the values dictionary."""
return IMPL.group_create(context, values)
return IMPL.group_create(context, values, group_snapshot_id, group_id)
def group_get_all_by_project(context, project_id, filters=None,
@ -1344,6 +1349,42 @@ def group_destroy(context, group_id):
return IMPL.group_destroy(context, group_id)
def group_has_group_snapshot_filter():
"""Return a filter that checks if a Group has Group Snapshots."""
return IMPL.group_has_group_snapshot_filter()
def group_has_volumes_filter(attached_or_with_snapshots=False):
"""Return a filter to check if a Group has volumes.
When attached_or_with_snapshots parameter is given a True value only
attached volumes or those with snapshots will be considered.
"""
return IMPL.group_has_volumes_filter(attached_or_with_snapshots)
def group_creating_from_src(group_id=None, group_snapshot_id=None):
"""Return a filter to check if a Group is being used as creation source.
Returned filter is meant to be used in the Conditional Update mechanism and
checks if provided Group ID or Group Snapshot ID is currently being used to
create another Group.
This filter will not include Groups that have used the ID but have already
finished their creation (status is no longer creating).
Filter uses a subquery that allows it to be used on updates to the
groups table.
"""
return IMPL.group_creating_from_src(group_id, group_snapshot_id)
def group_volume_type_mapping_create(context, group_id, volume_type_id):
"""Create a group volume_type mapping entry."""
return IMPL.group_volume_type_mapping_create(context, group_id,
volume_type_id)
###################
@ -1393,6 +1434,52 @@ def cgsnapshot_creating_from_src():
###################
def group_snapshot_get(context, group_snapshot_id):
"""Get a group snapshot or raise if it does not exist."""
return IMPL.group_snapshot_get(context, group_snapshot_id)
def group_snapshot_get_all(context, filters=None):
"""Get all group snapshots."""
return IMPL.group_snapshot_get_all(context, filters)
def group_snapshot_create(context, values):
"""Create a group snapshot from the values dictionary."""
return IMPL.group_snapshot_create(context, values)
def group_snapshot_get_all_by_group(context, group_id, filters=None):
"""Get all group snapshots belonging to a group."""
return IMPL.group_snapshot_get_all_by_group(context, group_id, filters)
def group_snapshot_get_all_by_project(context, project_id, filters=None):
"""Get all group snapshots belonging to a project."""
return IMPL.group_snapshot_get_all_by_project(context, project_id, filters)
def group_snapshot_update(context, group_snapshot_id, values):
"""Set the given properties on a group snapshot and update it.
Raises NotFound if group snapshot does not exist.
"""
return IMPL.group_snapshot_update(context, group_snapshot_id, values)
def group_snapshot_destroy(context, group_snapshot_id):
"""Destroy the group snapshot or raise if it does not exist."""
return IMPL.group_snapshot_destroy(context, group_snapshot_id)
def group_snapshot_creating_from_src():
"""Get a filter to check if a grp snapshot is being created from a grp."""
return IMPL.group_snapshot_creating_from_src()
###################
def purge_deleted_rows(context, age_in_days):
"""Purge deleted rows older than given age from cinder tables

@ -2254,6 +2254,8 @@ def volume_has_undeletable_snapshots_filter():
and_(models.Volume.id == models.Snapshot.volume_id,
~models.Snapshot.deleted,
or_(models.Snapshot.cgsnapshot_id != None, # noqa: != None
models.Snapshot.status.notin_(deletable_statuses)),
or_(models.Snapshot.group_snapshot_id != None, # noqa: != None
models.Snapshot.status.notin_(deletable_statuses))))
@ -2721,6 +2723,16 @@ def snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id):
all()
@require_context
def snapshot_get_all_for_group_snapshot(context, group_snapshot_id):
return model_query(context, models.Snapshot, read_deleted='no',
project_only=True).\
filter_by(group_snapshot_id=group_snapshot_id).\
options(joinedload('volume')).\
options(joinedload('snapshot_metadata')).\
all()
@require_context
def snapshot_get_all_by_project(context, project_id, filters=None, marker=None,
limit=None, sort_keys=None, sort_dirs=None,
@ -3745,6 +3757,14 @@ def volume_type_get_all_by_group(context, group_id):
return query
def _group_volume_type_mapping_get_all_by_group_volume_type(context, group_id,
volume_type_id):
mappings = _group_volume_type_mapping_query(context).\
filter_by(group_id=group_id).\
filter_by(volume_type_id=volume_type_id).all()
return mappings
@require_admin_context
def volume_type_access_add(context, type_id, project_id):
"""Add given tenant to the volume type access list."""
@ -5282,26 +5302,94 @@ def group_get_all_by_project(context, project_id, filters=None,
@handle_db_data_error
@require_context
def group_create(context, values):
group = models.Group()
def group_create(context, values, group_snapshot_id=None,
source_group_id=None):
group_model = models.Group
values = values.copy()
if not values.get('id'):
values['id'] = six.text_type(uuid.uuid4())
mappings = []
for item in values.get('volume_type_ids') or []:
mapping = models.GroupVolumeTypeMapping()
mapping['volume_type_id'] = item
mapping['group_id'] = values['id']
mappings.append(mapping)
session = get_session()
with session.begin():
if group_snapshot_id:
conditions = [group_model.id == models.GroupSnapshot.group_id,
models.GroupSnapshot.id == group_snapshot_id]
elif source_group_id:
conditions = [group_model.id == source_group_id]
else:
conditions = None
values['volume_types'] = mappings
if conditions:
# We don't want duplicated field values
values.pop('group_type_id', None)
values.pop('availability_zone', None)
values.pop('host', None)
sel = session.query(group_model.group_type_id,
group_model.availability_zone,
group_model.host,
*(bindparam(k, v) for k, v in values.items())
).filter(*conditions)
names = ['group_type_id', 'availability_zone', 'host']
names.extend(values.keys())
insert_stmt = group_model.__table__.insert().from_select(
names, sel)
result = session.execute(insert_stmt)
# If we couldn't insert the row because of the conditions raise
# the right exception
if not result.rowcount:
if source_group_id:
raise exception.GroupNotFound(
group_id=source_group_id)
raise exception.GroupSnapshotNotFound(
group_snapshot_id=group_snapshot_id)
else:
mappings = []
for item in values.get('volume_type_ids') or []:
mapping = models.GroupVolumeTypeMapping()
mapping['volume_type_id'] = item
mapping['group_id'] = values['id']
mappings.append(mapping)
values['volume_types'] = mappings
group = group_model()
group.update(values)
session.add(group)
return _group_get(context, values['id'], session=session)
@handle_db_data_error
@require_context
def group_volume_type_mapping_create(context, group_id, volume_type_id):
"""Add group volume_type mapping entry."""
# Verify group exists
_group_get(context, group_id)
# Verify volume type exists
_volume_type_get_id_from_volume_type(context, volume_type_id)
existing = _group_volume_type_mapping_get_all_by_group_volume_type(
context, group_id, volume_type_id)
if existing:
raise exception.GroupVolumeTypeMappingExists(
group_id=group_id,
volume_type_id=volume_type_id)
mapping = models.GroupVolumeTypeMapping()
mapping.update({"group_id": group_id,
"volume_type_id": volume_type_id})
session = get_session()
with session.begin():
group.update(values)
session.add(group)
return _group_get(context, values['id'], session=session)
try:
mapping.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.GroupVolumeTypeMappingExists(
group_id=group_id,
volume_type_id=volume_type_id)
return mapping
@handle_db_data_error
@ -5341,6 +5429,46 @@ def group_destroy(context, group_id):
'updated_at': literal_column('updated_at')}))
def group_has_group_snapshot_filter():
return sql.exists().where(and_(
models.GroupSnapshot.group_id == models.Group.id,
~models.GroupSnapshot.deleted))
def group_has_volumes_filter(attached_or_with_snapshots=False):
query = sql.exists().where(
and_(models.Volume.group_id == models.Group.id,
~models.Volume.deleted))
if attached_or_with_snapshots:
query = query.where(or_(
models.Volume.attach_status == 'attached',
sql.exists().where(
and_(models.Volume.id == models.Snapshot.volume_id,
~models.Snapshot.deleted))))
return query
def group_creating_from_src(group_id=None, group_snapshot_id=None):
# NOTE(geguileo): As explained in devref api_conditional_updates we use a
# subquery to trick MySQL into using the same table in the update and the
# where clause.
subq = sql.select([models.Group]).where(
and_(~models.Group.deleted,
models.Group.status == 'creating')).alias('group2')
if group_id:
match_id = subq.c.source_group_id == group_id
elif group_snapshot_id:
match_id = subq.c.group_snapshot_id == group_snapshot_id
else:
msg = _('group_creating_from_src must be called with group_id or '
'group_snapshot_id parameter.')
raise exception.ProgrammingError(reason=msg)
return sql.exists([subq]).where(match_id)
###############################
@ -5497,6 +5625,148 @@ def cgsnapshot_creating_from_src():
###############################
@require_context
def _group_snapshot_get(context, group_snapshot_id, session=None):
result = model_query(context, models.GroupSnapshot, session=session,
project_only=True).\
filter_by(id=group_snapshot_id).\
first()
if not result:
raise exception.GroupSnapshotNotFound(
group_snapshot_id=group_snapshot_id)
return result
@require_context
def group_snapshot_get(context, group_snapshot_id):
return _group_snapshot_get(context, group_snapshot_id)
def _group_snapshot_get_all(context, project_id=None, group_id=None,
filters=None):
query = model_query(context, models.GroupSnapshot)
if filters:
if not is_valid_model_filters(models.GroupSnapshot, filters):
return []
query = query.filter_by(**filters)
if project_id:
query = query.filter_by(project_id=project_id)
if group_id:
query = query.filter_by(group_id=group_id)
return query.all()
@require_admin_context
def group_snapshot_get_all(context, filters=None):
return _group_snapshot_get_all(context, filters=filters)
@require_admin_context
def group_snapshot_get_all_by_group(context, group_id, filters=None):
return _group_snapshot_get_all(context, group_id=group_id, filters=filters)
@require_context
def group_snapshot_get_all_by_project(context, project_id, filters=None):
authorize_project_context(context, project_id)
return _group_snapshot_get_all(context, project_id=project_id,
filters=filters)
@handle_db_data_error
@require_context
def group_snapshot_create(context, values):
if not values.get('id'):
values['id'] = six.text_type(uuid.uuid4())
group_id = values.get('group_id')
session = get_session()
model = models.GroupSnapshot
with session.begin():
if group_id:
# There has to exist at least 1 volume in the group and the group
# cannot be updating the composing volumes or being created.
conditions = [
sql.exists().where(and_(
~models.Volume.deleted,
models.Volume.group_id == group_id)),
~models.Group.deleted,
models.Group.id == group_id,
~models.Group.status.in_(('creating', 'updating'))]
# NOTE(geguileo): We build a "fake" from_select clause instead of
# using transaction isolation on the session because we would need
# SERIALIZABLE level and that would have a considerable performance
# penalty.
binds = (bindparam(k, v) for k, v in values.items())
sel = session.query(*binds).filter(*conditions)
insert_stmt = model.__table__.insert().from_select(values.keys(),
sel)
result = session.execute(insert_stmt)
# If we couldn't insert the row because of the conditions raise
# the right exception
if not result.rowcount:
msg = _("Source group cannot be empty or in 'creating' or "
"'updating' state. No group snapshot will be created.")
raise exception.InvalidGroup(reason=msg)
else:
group_snapshot = model()
group_snapshot.update(values)
session.add(group_snapshot)
return _group_snapshot_get(context, values['id'], session=session)
@require_context
@handle_db_data_error
def group_snapshot_update(context, group_snapshot_id, values):
session = get_session()
with session.begin():
result = model_query(context, models.GroupSnapshot,
project_only=True).\
filter_by(id=group_snapshot_id).\
first()
if not result:
raise exception.GroupSnapshotNotFound(
_("No group snapshot with id %s") % group_snapshot_id)
result.update(values)
result.save(session=session)
return result
@require_admin_context
def group_snapshot_destroy(context, group_snapshot_id):
session = get_session()
with session.begin():
updated_values = {'status': 'deleted',
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')}
model_query(context, models.GroupSnapshot, session=session).\
filter_by(id=group_snapshot_id).\
update(updated_values)
del updated_values['updated_at']
return updated_values
def group_snapshot_creating_from_src():
"""Get a filter to check if a grp snapshot is being created from a grp."""
return sql.exists().where(and_(
models.GroupSnapshot.group_id == models.Group.id,
~models.GroupSnapshot.deleted,
models.GroupSnapshot.status == 'creating'))
###############################
@require_admin_context
def purge_deleted_rows(context, age_in_days):
"""Purge deleted rows older than age from cinder tables."""
@ -5913,6 +6183,7 @@ def get_model_for_versioned_object(versioned_object):
'VolumeType': models.VolumeTypes,
'CGSnapshot': models.Cgsnapshot,
'GroupType': models.GroupTypes,
'GroupSnapshot': models.GroupSnapshot,
}
if isinstance(versioned_object, six.string_types):

@ -0,0 +1,63 @@
# Copyright (C) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import ForeignKey, MetaData, String, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
groups = Table('groups', meta, autoload=True)
# New table
group_snapshots = Table(
'group_snapshots', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', String(36), primary_key=True),
Column('group_id', String(36),
ForeignKey('groups.id'),
nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('name', String(length=255)),
Column('description', String(length=255)),
Column('status', String(length=255)),
Column('group_type_id', String(length=36)),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_snapshots.create()
# Add group_snapshot_id column to snapshots table
snapshots = Table('snapshots', meta, autoload=True)
group_snapshot_id = Column('group_snapshot_id', String(36),
ForeignKey('group_snapshots.id'))
snapshots.create_column(group_snapshot_id)
snapshots.update().values(group_snapshot_id=None).execute()
# Add group_snapshot_id column to groups table
group_snapshot_id = Column('group_snapshot_id', String(36))
groups.create_column(group_snapshot_id)
# Add source_group_id column to groups table
source_group_id = Column('source_group_id', String(36))
groups.create_column(source_group_id)

@ -186,6 +186,8 @@ class Group(BASE, CinderBase):
description = Column(String(255))
status = Column(String(255))
group_type_id = Column(String(36))
group_snapshot_id = Column(String(36))
source_group_id = Column(String(36))
class Cgsnapshot(BASE, CinderBase):
@ -208,6 +210,27 @@ class Cgsnapshot(BASE, CinderBase):
primaryjoin='Cgsnapshot.consistencygroup_id == ConsistencyGroup.id')
class GroupSnapshot(BASE, CinderBase):
"""Represents a group snapshot."""
__tablename__ = 'group_snapshots'
id = Column(String(36), primary_key=True)
group_id = Column(String(36), nullable=False)
user_id = Column(String(255))
project_id = Column(String(255))
name = Column(String(255))
description = Column(String(255))
status = Column(String(255))
group_type_id = Column(String(36))
group = relationship(
Group,
backref="group_snapshots",
foreign_keys=group_id,
primaryjoin='GroupSnapshot.group_id == Group.id')
class Volume(BASE, CinderBase):
"""Represents a block storage device that can be attached to a vm."""
__tablename__ = 'volumes'
@ -640,6 +663,7 @@ class Snapshot(BASE, CinderBase):
volume_id = Column(String(36))
cgsnapshot_id = Column(String(36))
group_snapshot_id = Column(String(36))
status = Column(String(255))
progress = Column(String(255))
volume_size = Column(Integer)
@ -664,6 +688,12 @@ class Snapshot(BASE, CinderBase):
foreign_keys=cgsnapshot_id,
primaryjoin='Snapshot.cgsnapshot_id == Cgsnapshot.id')
group_snapshot = relationship(
GroupSnapshot,
backref="snapshots",
foreign_keys=group_snapshot_id,
primaryjoin='Snapshot.group_snapshot_id == GroupSnapshot.id')
class SnapshotMetadata(BASE, CinderBase):
"""Represents a metadata key/value pair for a snapshot."""

@ -540,6 +540,11 @@ class GroupTypeAccessExists(Duplicate):
"%(project_id)s combination already exists.")
class GroupVolumeTypeMappingExists(Duplicate):
message = _("Group volume type mapping for %(group_id)s / "
"%(volume_type_id)s combination already exists.")
class GroupTypeEncryptionExists(Invalid):
message = _("Group type encryption for type %(type_id)s already exists.")
@ -1062,6 +1067,15 @@ class InvalidCgSnapshot(Invalid):
message = _("Invalid CgSnapshot: %(reason)s")
# GroupSnapshot
class GroupSnapshotNotFound(NotFound):
message = _("GroupSnapshot %(group_snapshot_id)s could not be found.")
class InvalidGroupSnapshot(Invalid):
message = _("Invalid GroupSnapshot: %(reason)s")
# Hitachi Block Storage Driver
class HBSDError(CinderException):
message = _("HBSD error occurs.")

@ -37,3 +37,4 @@ def register_all():
__import__('cinder.objects.volume_type')
__import__('cinder.objects.group_type')
__import__('cinder.objects.group')
__import__('cinder.objects.group_snapshot')

@ -114,6 +114,8 @@ OBJ_VERSIONS.add('1.8', {'RequestSpec': '1.0', 'VolumeProperties': '1.0'})
OBJ_VERSIONS.add('1.9', {'GroupType': '1.0', 'GroupTypeList': '1.0'})
OBJ_VERSIONS.add('1.10', {'Group': '1.0', 'GroupList': '1.0', 'Volume': '1.5',
'RequestSpec': '1.1', 'VolumeProperties': '1.1'})
OBJ_VERSIONS.add('1.11', {'GroupSnapshot': '1.0', 'GroupSnapshotList': '1.0',
'Group': '1.1'})
class CinderObjectRegistry(base.VersionedObjectRegistry):

@ -20,14 +20,16 @@ from cinder.objects import base
from cinder.objects import fields as c_fields
from oslo_versionedobjects import fields
OPTIONAL_FIELDS = ['volumes', 'volume_types']
OPTIONAL_FIELDS = ['volumes', 'volume_types', 'group_snapshots']
@base.CinderObjectRegistry.register
class Group(base.CinderPersistentObject, base.CinderObject,
base.CinderObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
# Version 1.1: Added group_snapshots, group_snapshot_id, and
# source_group_id
VERSION = '1.1'
fields = {
'id': fields.UUIDField(),
@ -41,9 +43,13 @@ class Group(base.CinderPersistentObject, base.CinderObject,
'group_type_id': fields.StringField(),
'volume_type_ids': fields.ListOfStringsField(nullable=True),
'status': c_fields.GroupStatusField(nullable=True),
'group_snapshot_id': fields.UUIDField(nullable=True),
'source_group_id': fields.UUIDField(nullable=True),
'volumes': fields.ObjectField('VolumeList', nullable=True),
'volume_types': fields.ObjectField('VolumeTypeList',
nullable=True),
'group_snapshots': fields.ObjectField('GroupSnapshotList',
nullable=True),
}
@staticmethod
@ -71,11 +77,18 @@ class Group(base.CinderPersistentObject, base.CinderObject,
db_group['volume_types'])
group.volume_types = volume_types
if 'group_snapshots' in expected_attrs:
group_snapshots = base.obj_make_list(
context, objects.GroupSnapshotList(context),
objects.GroupSnapshot,
db_group['group_snapshots'])
group.group_snapshots = group_snapshots
group._context = context
group.obj_reset_changes()
return group
def create(self):
def create(self, group_snapshot_id=None, source_group_id=None):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason=_('already_created'))
@ -90,8 +103,15 @@ class Group(base.CinderPersistentObject, base.CinderObject,
raise exception.ObjectActionError(action='create',
reason=_('volumes assigned'))
if 'group_snapshots' in updates:
raise exception.ObjectActionError(
action='create',
reason=_('group_snapshots assigned'))
db_groups = db.group_create(self._context,
updates)
updates,
group_snapshot_id,
source_group_id)
self._from_db_object(self._context, self, db_groups)
def obj_load_attr(self, attrname):
@ -111,6 +131,10 @@ class Group(base.CinderPersistentObject, base.CinderObject,
self.volumes = objects.VolumeList.get_all_by_generic_group(
self._context, self.id)
if attrname == 'group_snapshots':
self.group_snapshots = objects.GroupSnapshotList.get_all_by_group(
self._context, self.id)
self.obj_reset_changes(fields=[attrname])
def save(self):
@ -125,6 +149,11 @@ class Group(base.CinderPersistentObject, base.CinderObject,
msg = _('Cannot save volumes changes in group object update.')
raise exception.ObjectActionError(
action='save', reason=msg)
if 'group_snapshots' in updates:
msg = _('Cannot save group_snapshots changes in group object '
'update.')
raise exception.ObjectActionError(
action='save', reason=msg)
db.group_update(self._context, self.id, updates)
self.obj_reset_changes()

@ -0,0 +1,152 @@
# Copyright 2016 EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import base
from oslo_versionedobjects import fields
OPTIONAL_FIELDS = ['group', 'snapshots']
@base.CinderObjectRegistry.register
class GroupSnapshot(base.CinderPersistentObject, base.CinderObject,
base.CinderObjectDictCompat):
VERSION = '1.0'
fields = {
'id': fields.UUIDField(),
'group_id': fields.UUIDField(nullable=False),
'project_id': fields.StringField(nullable=True),
'user_id': fields.StringField(nullable=True),
'name': fields.StringField(nullable=True),
'description': fields.StringField(nullable=True),
'status': fields.StringField(nullable=True),
'group_type_id': fields.UUIDField(nullable=True),
'group': fields.ObjectField('Group', nullable=True),
'snapshots': fields.ObjectField('SnapshotList', nullable=True),
}
@staticmethod
def _from_db_object(context, group_snapshot, db_group_snapshots,
expected_attrs=None):
expected_attrs = expected_attrs or []
for name, field in group_snapshot.fields.items():
if name in OPTIONAL_FIELDS:
continue
value = db_group_snapshots.get(name)
setattr(group_snapshot, name, value)
if 'group' in expected_attrs:
group = objects.Group(context)
group._from_db_object(context, group,
db_group_snapshots['group'])
group_snapshot.group = group
if 'snapshots' in expected_attrs:
snapshots = base.obj_make_list(
context, objects.SnapshotsList(context),
objects.Snapshots,
db_group_snapshots['snapshots'])
group_snapshot.snapshots = snapshots
group_snapshot._context = context
group_snapshot.obj_reset_changes()
return group_snapshot
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason=_('already_created'))
updates = self.cinder_obj_get_changes()
if 'group' in updates:
raise exception.ObjectActionError(
action='create', reason=_('group assigned'))
db_group_snapshots = db.group_snapshot_create(self._context, updates)
self._from_db_object(self._context, self, db_group_snapshots)
def obj_load_attr(self, attrname):
if attrname not in OPTIONAL_FIELDS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason=_('attribute %s not lazy-loadable') % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
if attrname == 'group':
self.group = objects.Group.get_by_id(
self._context, self.group_id)
if attrname == 'snapshots':
self.snapshots = objects.SnapshotList.get_all_for_group_snapshot(
self._context, self.id)
self.obj_reset_changes(fields=[attrname])
def save(self):
updates = self.cinder_obj_get_changes()
if updates:
if 'group' in updates:
raise exception.ObjectActionError(
action='save', reason=_('group changed'))
if 'snapshots' in updates:
raise exception.ObjectActionError(
action='save', reason=_('snapshots changed'))
db.group_snapshot_update(self._context, self.id, updates)
self.obj_reset_changes()
def destroy(self):
with self.obj_as_admin():
updated_values = db.group_snapshot_destroy(self._context, self.id)
self.update(updated_values)
self.obj_reset_changes(updated_values.keys())
@base.CinderObjectRegistry.register
class GroupSnapshotList(base.ObjectListBase, base.CinderObject):
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('GroupSnapshot')
}
child_version = {
'1.0': '1.0'
}
@classmethod
def get_all(cls, context, filters=None):
group_snapshots = db.group_snapshot_get_all(context, filters)
return base.obj_make_list(context, cls(context), objects.GroupSnapshot,
group_snapshots)
@classmethod
def get_all_by_project(cls, context, project_id, filters=None):
group_snapshots = db.group_snapshot_get_all_by_project(context,
project_id,
filters)
return base.obj_make_list(context, cls(context), objects.GroupSnapshot,
group_snapshots)
@classmethod
def get_all_by_group(cls, context, group_id, filters=None):
group_snapshots = db.group_snapshot_get_all_by_group(context, group_id,
filters)
return base.obj_make_list(context, cls(context),
objects.GroupSnapshot,
group_snapshots)

@ -36,7 +36,7 @@ class Snapshot(base.CinderPersistentObject, base.CinderObject,
# NOTE(thangp): OPTIONAL_FIELDS are fields that would be lazy-loaded. They
# are typically the relationship in the sqlalchemy object.
OPTIONAL_FIELDS = ('volume', 'metadata', 'cgsnapshot')
OPTIONAL_FIELDS = ('volume', 'metadata', 'cgsnapshot', 'group_snapshot')
fields = {
'id': fields.UUIDField(),
@ -46,6 +46,7 @@ class Snapshot(base.CinderPersistentObject, base.CinderObject,
'volume_id': fields.UUIDField(nullable=True),
'cgsnapshot_id': fields.UUIDField(nullable=True),
'group_snapshot_id': fields.UUIDField(nullable=True),
'status': c_fields.SnapshotStatusField(nullable=True),
'progress': fields.StringField(nullable=True),
'volume_size': fields.IntegerField(nullable=True),
@ -63,6 +64,7 @@ class Snapshot(base.CinderPersistentObject, base.CinderObject,
'volume': fields.ObjectField('Volume', nullable=True),
'cgsnapshot': fields.ObjectField('CGSnapshot', nullable=True),
'group_snapshot': fields.ObjectField('GroupSnapshot', nullable=True),
}
@property
@ -133,6 +135,12 @@ class Snapshot(base.CinderPersistentObject, base.CinderObject,
cgsnapshot._from_db_object(context, cgsnapshot,
db_snapshot['cgsnapshot'])
snapshot.cgsnapshot = cgsnapshot
if 'group_snapshot' in expected_attrs:
group_snapshot = objects.GroupSnapshot(context)
group_snapshot._from_db_object(context, group_snapshot,
db_snapshot['group_snapshot'])
snapshot.group_snapshot = group_snapshot
if 'metadata' in expected_attrs:
metadata = db_snapshot.get('snapshot_metadata')
if metadata is None:
@ -158,6 +166,10 @@ class Snapshot(base.CinderPersistentObject, base.CinderObject,
if 'cluster' in updates:
raise exception.ObjectActionError(
action='create', reason=_('cluster assigned'))
if 'group_snapshot' in updates:
raise exception.ObjectActionError(
action='create',
reason=_('group_snapshot assigned'))
db_snapshot = db.snapshot_create(self._context, updates)
self._from_db_object(self._context, self, db_snapshot)
@ -171,6 +183,9 @@ class Snapshot(base.CinderPersistentObject, base.CinderObject,
if 'cgsnapshot' in updates:
raise exception.ObjectActionError(
action='save', reason=_('cgsnapshot changed'))
if 'group_snapshot' in updates:
raise exception.ObjectActionError(
action='save', reason=_('group_snapshot changed'))
if 'cluster' in updates:
raise exception.ObjectActionError(
@ -210,6 +225,11 @@ class Snapshot(base.CinderPersistentObject, base.CinderObject,
self.cgsnapshot = objects.CGSnapshot.get_by_id(self._context,
self.cgsnapshot_id)
if attrname == 'group_snapshot':
self.group_snapshot = objects.GroupSnapshot.get_by_id(
self._context,
self.group_snapshot_id)
self.obj_reset_changes(fields=[attrname])
def delete_metadata_key(self, context, key):
@ -284,3 +304,11 @@ class SnapshotList(base.ObjectListBase, base.CinderObject):
expected_attrs = Snapshot._get_expected_attrs(context)
return base.obj_make_list(context, cls(context), objects.Snapshot,
snapshots, expected_attrs=expected_attrs)
@classmethod
def get_all_for_group_snapshot(cls, context, group_snapshot_id):
snapshots = db.snapshot_get_all_for_group_snapshot(
context, group_snapshot_id)
expected_attrs = Snapshot._get_expected_attrs(context)
return base.obj_make_list(context, cls(context), objects.Snapshot,
snapshots, expected_attrs=expected_attrs)

@ -76,3 +76,5 @@ GROUP_TYPE2_ID = 'f8645498-1323-47a2-9442-5c57724d2e3c'
GROUP_TYPE3_ID = '1b7915f4-b899-4510-9eff-bd67508c3334'
GROUP_ID = '9a965cc6-ee3a-468d-a721-cebb193f696f'
GROUP2_ID = '40a85639-abc3-4461-9230-b131abd8ee07'
GROUP_SNAPSHOT_ID = '1e2ab152-44f0-11e6-819f-000c29d19d84'
GROUP_SNAPSHOT2_ID = '33e2ff04-44f0-11e6-819f-000c29d19d84'

@ -0,0 +1,187 @@
# Copyright 2016 EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import timeutils
import pytz
import six
from cinder import exception
from cinder import objects
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import objects as test_objects
from cinder.tests.unit.objects.test_group import fake_group
fake_group_snapshot = {
'id': fake.GROUP_SNAPSHOT_ID,
'user_id': fake.USER_ID,
'project_id': fake.PROJECT_ID,
'name': 'fake_name',
'description': 'fake_description',
'status': 'creating',
'group_id': fake.GROUP_ID,
}
class TestGroupSnapshot(test_objects.BaseObjectsTestCase):
@mock.patch('cinder.db.sqlalchemy.api.group_snapshot_get',
return_value=fake_group_snapshot)
def test_get_by_id(self, group_snapshot_get):
group_snapshot = objects.GroupSnapshot.get_by_id(
self.context,
fake.GROUP_SNAPSHOT_ID)
self._compare(self, fake_group_snapshot, group_snapshot)
@mock.patch('cinder.db.group_snapshot_create',
return_value=fake_group_snapshot)
def test_create(self, group_snapshot_create):
fake_group_snap = fake_group_snapshot.copy()
del fake_group_snap['id']
group_snapshot = objects.GroupSnapshot(context=self.context,
**fake_group_snap)
group_snapshot.create()
self._compare(self, fake_group_snapshot, group_snapshot)
def test_create_with_id_except_exception(self):
group_snapshot = objects.GroupSnapshot(
context=self.context,
**{'id': fake.GROUP_ID})
self.assertRaises(exception.ObjectActionError, group_snapshot.create)
@mock.patch('cinder.db.group_snapshot_update')
def test_save(self, group_snapshot_update):
group_snapshot = objects.GroupSnapshot._from_db_object(
self.context, objects.GroupSnapshot(), fake_group_snapshot)
group_snapshot.status = 'active'
group_snapshot.save()
group_snapshot_update.assert_called_once_with(self.context,
group_snapshot.id,
{'status': 'active'})
@mock.patch('cinder.db.group_update',
return_value=fake_group)
@mock.patch('cinder.db.group_snapshot_update')
def test_save_with_group(self, group_snapshot_update,
group_snapshot_cg_update):
group = objects.Group._from_db_object(
self.context, objects.Group(), fake_group)
group_snapshot = objects.GroupSnapshot._from_db_object(
self.context, objects.GroupSnapshot(), fake_group_snapshot)
group_snapshot.name = 'foobar'
group_snapshot.group = group
self.assertEqual({'name': 'foobar',
'group': group},
group_snapshot.obj_get_changes())
self.assertRaises(exception.ObjectActionError, group_snapshot.save)
@mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow())
@mock.patch('cinder.db.sqlalchemy.api.group_snapshot_destroy')
def test_destroy(self, group_snapshot_destroy, utcnow_mock):
group_snapshot_destroy.return_value = {
'status': 'deleted',
'deleted': True,
'deleted_at': utcnow_mock.return_value}
group_snapshot = objects.GroupSnapshot(context=self.context,
id=fake.GROUP_SNAPSHOT_ID)
group_snapshot.destroy()
self.assertTrue(group_snapshot_destroy.called)
admin_context = group_snapshot_destroy.call_args[0][0]
self.assertTrue(admin_context.is_admin)
self.assertTrue(group_snapshot.deleted)
self.assertEqual('deleted', group_snapshot.status)
self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC),
group_snapshot.deleted_at)
@mock.patch('cinder.objects.group.Group.get_by_id')
@mock.patch(
'cinder.objects.snapshot.SnapshotList.get_all_for_group_snapshot')
def test_obj_load_attr(self, snapshotlist_get_for_cgs,
group_get_by_id):
group_snapshot = objects.GroupSnapshot._from_db_object(
self.context, objects.GroupSnapshot(), fake_group_snapshot)
# Test group lazy-loaded field
group = objects.Group(
context=self.context, id=fake.GROUP_ID)
group_get_by_id.return_value = group
self.assertEqual(group, group_snapshot.group)
group_get_by_id.assert_called_once_with(
self.context, group_snapshot.group_id)
# Test snapshots lazy-loaded field
snapshots_objs = [objects.Snapshot(context=self.context, id=i)
for i in [fake.SNAPSHOT_ID, fake.SNAPSHOT2_ID,
fake.SNAPSHOT3_ID]]
snapshots = objects.SnapshotList(context=self.context,
objects=snapshots_objs)
snapshotlist_get_for_cgs.return_value = snapshots
self.assertEqual(snapshots, group_snapshot.snapshots)
snapshotlist_get_for_cgs.assert_called_once_with(
self.context, group_snapshot.id)
@mock.patch('cinder.db.sqlalchemy.api.group_snapshot_get')
def test_refresh(self, group_snapshot_get):
db_group_snapshot1 = fake_group_snapshot.copy()
db_group_snapshot2 = db_group_snapshot1.copy()
db_group_snapshot2['description'] = 'foobar'
# On the second group_snapshot_get, return the GroupSnapshot with an
# updated description
group_snapshot_get.side_effect = [db_group_snapshot1,
db_group_snapshot2]
group_snapshot = objects.GroupSnapshot.get_by_id(
self.context, fake.GROUP_SNAPSHOT_ID)
self._compare(self, db_group_snapshot1, group_snapshot)
# description was updated, so a GroupSnapshot refresh should have a new
# value for that field
group_snapshot.refresh()
self._compare(self, db_group_snapshot2, group_snapshot)
if six.PY3:
call_bool = mock.call.__bool__()
else:
call_bool = mock.call.__nonzero__()
group_snapshot_get.assert_has_calls(
[mock.call(self.context,
fake.GROUP_SNAPSHOT_ID),
call_bool,
mock.call(self.context,
fake.GROUP_SNAPSHOT_ID)])
class TestGroupSnapshotList(test_objects.BaseObjectsTestCase):
@mock.patch('cinder.db.group_snapshot_get_all',
return_value=[fake_group_snapshot])
def test_get_all(self, group_snapshot_get_all):
group_snapshots = objects.GroupSnapshotList.get_all(self.context)
self.assertEqual(1, len(group_snapshots))
TestGroupSnapshot._compare(self, fake_group_snapshot,
group_snapshots[0])
@mock.patch('cinder.db.group_snapshot_get_all_by_project',
return_value=[fake_group_snapshot])
def test_get_all_by_project(self, group_snapshot_get_all_by_project):
group_snapshots = objects.GroupSnapshotList.get_all_by_project(
self.context, self.project_id)
self.assertEqual(1, len(group_snapshots))
TestGroupSnapshot._compare(self, fake_group_snapshot,
group_snapshots[0])
@mock.patch('cinder.db.group_snapshot_get_all_by_group',
return_value=[fake_group_snapshot])
def test_get_all_by_group(self, group_snapshot_get_all_by_group):
group_snapshots = objects.GroupSnapshotList.get_all_by_group(
self.context, self.project_id)
self.assertEqual(1, len(group_snapshots))
TestGroupSnapshot._compare(self, fake_group_snapshot,
group_snapshots[0])

@ -37,7 +37,7 @@ object_data = {
'RequestSpec': '1.1-b0bd1a28d191d75648901fa853e8a733',
'Service': '1.4-c7d011989d1718ca0496ccf640b42712',
'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'Snapshot': '1.1-37966f7141646eb29e9ad5298ff2ca8a',
'Snapshot': '1.1-d6a9d58f627bb2a5cf804b0dd7a12bc7',
'SnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'Volume': '1.5-19919d8086d6a38ab9d3ab88139e70e0',
'VolumeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
@ -48,8 +48,10 @@ object_data = {
'VolumeTypeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'GroupType': '1.0-d4a7b272199d0b0d6fc3ceed58539d30',
'GroupTypeList': '1.0-1b54e51ad0fc1f3a8878f5010e7e16dc',
'Group': '1.0-fd0a002ba8c1388fe9d94ec20b346f0c',
'Group': '1.1-bd853b1d1ee05949d9ce4b33f80ac1a0',
'GroupList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'GroupSnapshot': '1.0-9af3e994e889cbeae4427c3e351fa91d',
'GroupSnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
}

@ -1003,6 +1003,47 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
execute().scalar()
self.assertEqual(1, rows)
def _check_079(self, engine, data):
"""Test adding group_snapshots tables."""
self.assertTrue(engine.dialect.has_table(engine.connect(),
"group_snapshots"))
group_snapshots = db_utils.get_table(engine, 'group_snapshots')
self.assertIsInstance(group_snapshots.c.id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.name.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.description.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_snapshots.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_snapshots.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(group_snapshots.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(group_snapshots.c.user_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.project_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.group_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.group_type_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(group_snapshots.c.status.type,
self.VARCHAR_TYPE)
snapshots = db_utils.get_table(engine, 'snapshots')
self.assertIsInstance(snapshots.c.group_snapshot_id.type,
self.VARCHAR_TYPE)
groups = db_utils.get_table(engine, 'groups')
self.assertIsInstance(groups.c.group_snapshot_id.type,
self.VARCHAR_TYPE)
self.assertIsInstance(groups.c.source_group_id.type,
self.VARCHAR_TYPE)
def test_walk_versions(self):
self.walk_versions(False, False)

@ -106,6 +106,10 @@ objects_ignore_messages = [
"Module 'cinder.objects' has no 'VolumeTypeList' member",
"Module 'cinder.objects' has no 'Group' member",
"Module 'cinder.objects' has no 'GroupList' member",
"Module 'cinder.objects' has no 'GroupSnapshot' member",
"Module 'cinder.objects' has no 'GroupSnapshotList' member",
"Class 'Group' has no '__table__' member",
"Class 'GroupSnapshot' has no '__table__' member",
]
objects_ignore_modules = ["cinder/objects/"]