Add SnapshotStatus enum field

This change adds a new enum and field, SnapshotStatus and
SnapshotStatusField, that will hold the constants for the 'status'
field of the SnapshotStatus object. This enum and field are based
off the base oslo.versionedobjects enum and field. This also changes
over the Snapshot object to use the new field and bumps up the version
so newer versions know to enforce valid values. Finally, all uses of
strings for comparison and assignment to this field are changed over
to use the constants defined within the enum.

Change-Id: I968ad7a1f422811eaf437af435361dac7915b594
Partial-Implements: bp cinder-object-fields
This commit is contained in:
Kendall Nelson 2016-03-08 18:11:27 -06:00
parent e43aa0061b
commit 7043cc1cb9
44 changed files with 346 additions and 184 deletions

View File

@ -24,6 +24,7 @@ from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
from cinder import rpc
from cinder import utils
from cinder import volume
@ -275,6 +276,7 @@ class SnapshotAdminController(AdminController):
"""AdminController for Snapshots."""
collection = 'snapshots'
valid_status = fields.SnapshotStatus.ALL
def _update(self, *args, **kwargs):
context = args[0]

View File

@ -19,6 +19,7 @@ from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.i18n import _, _LI
from cinder import objects
from cinder.objects import fields
LOG = logging.getLogger(__name__)
@ -53,8 +54,13 @@ class SnapshotActionsController(wsgi.Controller):
raise webob.exc.HTTPBadRequest(explanation=msg)
# Allowed state transitions
status_map = {'creating': ['creating', 'available', 'error'],
'deleting': ['deleting', 'error_deleting']}
status_map = {fields.SnapshotStatus.CREATING:
[fields.SnapshotStatus.CREATING,
fields.SnapshotStatus.AVAILABLE,
fields.SnapshotStatus.ERROR],
fields.SnapshotStatus.DELETING:
[fields.SnapshotStatus.DELETING,
fields.SnapshotStatus.ERROR_DELETING]}
current_snapshot = objects.Snapshot.get_by_id(context, id)

View File

@ -100,6 +100,7 @@ OBJ_VERSIONS.add('1.0', {'Backup': '1.3', 'BackupImport': '1.3',
OBJ_VERSIONS.add('1.1', {'Service': '1.2', 'ServiceList': '1.1'})
OBJ_VERSIONS.add('1.2', {'Backup': '1.4', 'BackupImport': '1.4'})
OBJ_VERSIONS.add('1.3', {'Service': '1.3'})
OBJ_VERSIONS.add('1.4', {'Snapshot': '1.1'})
class CinderObjectRegistry(base.VersionedObjectRegistry):

View File

@ -84,3 +84,24 @@ class ReplicationStatus(Enum):
class ReplicationStatusField(BaseEnumField):
AUTO_TYPE = ReplicationStatus()
class SnapshotStatus(Enum):
ERROR = 'error'
AVAILABLE = 'available'
CREATING = 'creating'
DELETING = 'deleting'
DELETED = 'deleted'
UPDATING = 'updating'
ERROR_DELETING = 'error_deleting'
ALL = (ERROR, AVAILABLE, CREATING, DELETING, DELETED,
UPDATING, ERROR_DELETING)
def __init__(self):
super(SnapshotStatus, self).__init__(
valid_values=SnapshotStatus.ALL)
class SnapshotStatusField(BaseEnumField):
AUTO_TYPE = SnapshotStatus()

View File

@ -22,7 +22,7 @@ from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import base
from cinder.objects import fields as c_fields
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -31,7 +31,8 @@ LOG = logging.getLogger(__name__)
class Snapshot(base.CinderPersistentObject, base.CinderObject,
base.CinderObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
# Version 1.1: Changed 'status' field to use SnapshotStatusField
VERSION = '1.1'
# NOTE(thangp): OPTIONAL_FIELDS are fields that would be lazy-loaded. They
# are typically the relationship in the sqlalchemy object.
@ -45,7 +46,7 @@ class Snapshot(base.CinderPersistentObject, base.CinderObject,
'volume_id': fields.UUIDField(nullable=True),
'cgsnapshot_id': fields.UUIDField(nullable=True),
'status': fields.StringField(nullable=True),
'status': c_fields.SnapshotStatusField(nullable=True),
'progress': fields.StringField(nullable=True),
'volume_size': fields.IntegerField(nullable=True),

View File

@ -390,7 +390,7 @@ class AdminActionsTest(BaseAdminTest):
'cgsnapshot_id': None,
'user_id': self.ctx.user_id,
'project_id': self.ctx.project_id,
'status': 'error_deleting',
'status': fields.SnapshotStatus.ERROR_DELETING,
'progress': '0%',
'volume_size': volume['size'],
'metadata': {}
@ -400,17 +400,19 @@ class AdminActionsTest(BaseAdminTest):
self.addCleanup(snapshot.destroy)
resp = self._issue_snapshot_reset(self.ctx, snapshot,
{'status': 'error'})
{'status':
fields.SnapshotStatus.ERROR})
self.assertEqual(202, resp.status_int)
snapshot = objects.Snapshot.get_by_id(self.ctx, snapshot['id'])
self.assertEqual('error', snapshot.status)
self.assertEqual(fields.SnapshotStatus.ERROR, snapshot.status)
def test_invalid_status_for_snapshot(self):
volume = db.volume_create(self.ctx,
{'status': 'available', 'host': 'test',
'provider_location': '', 'size': 1})
snapshot = objects.Snapshot(self.ctx, status='available',
snapshot = objects.Snapshot(self.ctx,
status=fields.SnapshotStatus.AVAILABLE,
volume_id=volume['id'])
snapshot.create()
self.addCleanup(snapshot.destroy)
@ -419,7 +421,7 @@ class AdminActionsTest(BaseAdminTest):
{'status': 'attaching'})
self.assertEqual(400, resp.status_int)
self.assertEqual('available', snapshot.status)
self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot.status)
def test_force_delete(self):
# current status is creating

View File

@ -703,7 +703,8 @@ class BackupsAPITestCase(test.TestCase):
if backup_from_snapshot:
snapshot = utils.create_snapshot(self.context,
volume_id,
status='available')
status=
fields.SnapshotStatus.AVAILABLE)
snapshot_id = snapshot.id
backup_id = self._create_backup(volume_id,
status=fields.BackupStatus.AVAILABLE)

View File

@ -872,7 +872,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
self.ctxt,
volume_id,
cgsnapshot_id=cgsnapshot.id,
status='available')
status=fields.SnapshotStatus.AVAILABLE)
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
@ -948,7 +948,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
self.ctxt,
volume_id,
cgsnapshot_id=cgsnapshot_id,
status='available')
status=fields.SnapshotStatus.AVAILABLE)
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
@ -1023,7 +1023,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
self.ctxt,
volume_id,
cgsnapshot_id=cgsnapshot.id,
status='available')
status=fields.SnapshotStatus.AVAILABLE)
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,
@ -1165,7 +1165,7 @@ class ConsistencyGroupsAPITestCase(test.TestCase):
self.ctxt,
volume_id,
cgsnapshot_id=cgsnapshot.id,
status='available')
status=fields.SnapshotStatus.AVAILABLE)
test_cg_name = 'test cg'
body = {"consistencygroup-from-src": {"name": test_cg_name,

View File

@ -19,6 +19,7 @@ from oslo_serialization import jsonutils
import webob
from cinder import context
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
@ -33,7 +34,7 @@ UUID2 = fake.SNAPSHOT2_ID
def _get_default_snapshot_param():
return {'id': UUID1,
'volume_id': fake.VOLUME_ID,
'status': 'available',
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',

View File

@ -18,6 +18,7 @@ import webob
from cinder import context
from cinder import db
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit.api.v2 import stubs
@ -26,10 +27,11 @@ from cinder.tests.unit import fake_constants as fake
def stub_snapshot_get(context, snapshot_id):
snapshot = stubs.stub_snapshot(snapshot_id)
if snapshot_id == fake.SNAPSHOT_ID:
snapshot['status'] = 'creating'
snapshot['status'] = fields.SnapshotStatus.CREATING
else:
snapshot['status'] = 'error'
snapshot['status'] = fields.SnapshotStatus.ERROR
return snapshot
@ -45,7 +47,9 @@ class SnapshotActionsTest(test.TestCase):
side_effect=stub_snapshot_get)
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
def test_update_snapshot_status(self, metadata_get, *args):
body = {'os-update_snapshot_status': {'status': 'available'}}
body = {'os-update_snapshot_status':
{'status': fields.SnapshotStatus.AVAILABLE}}
req = webob.Request.blank('/v2/%s/snapshots/%s/action' % (
fake.PROJECT_ID, fake.SNAPSHOT_ID))
req.method = "POST"

View File

@ -18,6 +18,7 @@ import webob
from cinder import context
from cinder import exception
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
@ -49,7 +50,7 @@ def api_snapshot_get(self, context, snp_id):
'progress': '100%',
'volume_id': fake.VOLUME_ID,
'project_id': fake.PROJECT_ID,
'status': 'available'}
'status': fields.SnapshotStatus.AVAILABLE}
if snp_id == snapshot_id:
snapshot_objct = fake_snapshot.fake_snapshot_obj(context, **snapshot)
return snapshot_objct

View File

@ -18,6 +18,7 @@ import iso8601
from cinder import exception as exc
from cinder import objects
from cinder.objects import fields
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
@ -160,7 +161,7 @@ def stub_volume_get_all_by_project(self, context, marker, limit,
def stub_snapshot(id, **kwargs):
snapshot = {'id': id,
'volume_id': fake.SNAPSHOT_ID,
'status': 'available',
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',

View File

@ -21,6 +21,7 @@ from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit.api.v1 import stubs
@ -37,7 +38,7 @@ INVALID_UUID = fake.WILL_NOT_BE_FOUND_ID
def _get_default_snapshot_param():
return {'id': UUID,
'volume_id': fake.VOLUME_ID,
'status': 'available',
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
@ -154,7 +155,7 @@ class SnapshotApiTest(test.TestCase):
snapshot = {
'id': UUID,
'volume_id': fake.VOLUME_ID,
'status': 'available',
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 100,
'display_name': 'Default name',
'display_description': 'Default description',
@ -173,7 +174,7 @@ class SnapshotApiTest(test.TestCase):
expected = {'snapshot': {
'id': UUID,
'volume_id': fake.VOLUME_ID,
'status': u'available',
'status': fields.SnapshotStatus.AVAILABLE,
'size': 100,
'created_at': None,
'display_name': u'Updated Test Name',
@ -214,7 +215,7 @@ class SnapshotApiTest(test.TestCase):
snapshot = {
'id': UUID,
'volume_id': fake.VOLUME_ID,
'status': 'available',
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 100,
'display_name': 'Default name',
'display_description': 'Default description',
@ -248,7 +249,7 @@ class SnapshotApiTest(test.TestCase):
snapshot = {
'id': UUID,
'volume_id': fake.VOLUME_ID,
'status': 'available',
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 100,
'display_name': 'Default name',
'display_description': 'Default description',
@ -283,7 +284,7 @@ class SnapshotApiTest(test.TestCase):
snapshot = {
'id': UUID,
'volume_id': fake.VOLUME_ID,
'status': 'available',
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 100,
'display_name': 'Default name',
'display_description': 'Default description',

View File

@ -18,6 +18,7 @@ import iso8601
from cinder import exception as exc
from cinder import objects
from cinder.objects import fields
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
@ -190,7 +191,7 @@ def stub_volume_api_get_all_by_project(self, context, marker, limit,
def stub_snapshot(id, **kwargs):
snapshot = {'id': id,
'volume_id': fake.VOLUME_ID,
'status': 'available',
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',

View File

@ -24,6 +24,7 @@ from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit.api.v2 import stubs
@ -44,7 +45,7 @@ def _get_default_snapshot_param():
return {
'id': UUID,
'volume_id': fake.VOLUME_ID,
'status': 'available',
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 100,
'created_at': None,
'updated_at': None,
@ -175,7 +176,7 @@ class SnapshotApiTest(test.TestCase):
snapshot = {
'id': UUID,
'volume_id': fake.VOLUME_ID,
'status': 'available',
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 100,
'display_name': 'Default name',
'display_description': 'Default description',
@ -197,7 +198,7 @@ class SnapshotApiTest(test.TestCase):
'snapshot': {
'id': UUID,
'volume_id': fake.VOLUME_ID,
'status': u'available',
'status': fields.SnapshotStatus.AVAILABLE,
'size': 100,
'created_at': None,
'updated_at': None,
@ -242,7 +243,7 @@ class SnapshotApiTest(test.TestCase):
snapshot = {
'id': UUID,
'volume_id': fake.VOLUME_ID,
'status': 'available',
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 100,
'display_name': 'Default name',
'display_description': 'Default description',
@ -274,7 +275,7 @@ class SnapshotApiTest(test.TestCase):
snapshot = {
'id': UUID,
'volume_id': fake.VOLUME_ID,
'status': 'available',
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 100,
'display_name': 'Default name',
'display_description': 'Default description',
@ -307,7 +308,7 @@ class SnapshotApiTest(test.TestCase):
snapshot = {
'id': UUID,
'volume_id': fake.VOLUME_ID,
'status': 'available',
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 100,
'display_name': 'Default name',
'display_description': 'Default description',

View File

@ -14,6 +14,7 @@
from oslo_versionedobjects import fields
from cinder.objects import fields as c_fields
from cinder.objects import snapshot
from cinder.tests.unit import fake_constants as fake
@ -22,7 +23,7 @@ def fake_db_snapshot(**updates):
db_snapshot = {
'id': fake.SNAPSHOT_ID,
'volume_id': fake.VOLUME_ID,
'status': "creating",
'status': c_fields.SnapshotStatus.CREATING,
'progress': '0%',
'volume_size': 1,
'display_name': 'fake_name',

View File

@ -105,3 +105,25 @@ class TestConsistencyGroupStatus(TestField):
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'not_a_status')
class TestSnapshotStatus(TestField):
def setUp(self):
super(TestSnapshotStatus, self).setUp()
self.field = fields.SnapshotStatusField()
self.coerce_good_values = [('error', 'error'),
('available', 'available'),
('creating', 'creating'),
('deleting', 'deleting'),
('deleted', 'deleted'),
('updating', 'updating'),
('error_deleting', 'error_deleting')]
self.coerce_bad_values = ['acme']
self.to_primitive_values = self.coerce_good_values[0:1]
self.from_primitive_values = self.coerce_good_values[0:1]
def test_stringify(self):
self.assertEqual("'error'", self.field.stringify('error'))
def test_stringify_invalid(self):
self.assertRaises(ValueError, self.field.stringify, 'not_a_status')

View File

@ -31,7 +31,7 @@ object_data = {
'ConsistencyGroupList': '1.1-73916823b697dfa0c7f02508d87e0f28',
'Service': '1.3-66c8e1683f58546c54551e9ff0a3b111',
'ServiceList': '1.1-cb758b200f0a3a90efabfc5aa2ffb627',
'Snapshot': '1.0-8bd748dde255ed977a1fa11888f13500',
'Snapshot': '1.1-ac41f2fe2fb0e34127155d1ec6e4c7e0',
'SnapshotList': '1.0-71661e7180ef6cc51501704a9bea4bf1',
'Volume': '1.3-049e3e5dc411b1a4deb7d6ee4f1ad5ef',
'VolumeAttachment': '1.0-8fc9a9ac6f554fdf2a194d25dbf28a3b',

View File

@ -21,6 +21,7 @@ from oslo_log import log as logging
from cinder.db.sqlalchemy import models
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
@ -40,7 +41,7 @@ del fake_db_snapshot['volume']
fake_snapshot_obj = {
'id': fake.SNAPSHOT_ID,
'volume_id': fake.VOLUME_ID,
'status': "creating",
'status': fields.SnapshotStatus.CREATING,
'progress': '0%',
'volume_size': 1,
'display_name': 'fake_name',

View File

@ -131,7 +131,7 @@ class BaseBackupTest(test.TestCase):
def _create_snapshot_db_entry(self, display_name='test_snapshot',
display_description='test snapshot',
status='available',
status=fields.SnapshotStatus.AVAILABLE,
size=1,
volume_id=str(uuid.uuid4()),
provider_location=None):
@ -228,7 +228,7 @@ class BackupTestCase(BaseBackupTest):
vol5_id = self._create_volume_db_entry()
db.volume_update(self.ctxt, vol5_id, {'status': 'backing-up'})
temp_snap = self._create_snapshot_db_entry()
temp_snap.status = 'available'
temp_snap.status = fields.SnapshotStatus.AVAILABLE
temp_snap.save()
backup1 = self._create_backup_db_entry(

View File

@ -19,6 +19,7 @@ from oslo_config import cfg
from cinder import context
from cinder import db
import cinder.exception
from cinder.objects import fields
from cinder.objects import snapshot as obj_snap
from cinder.objects import volume as obj_volume
import cinder.test
@ -402,7 +403,7 @@ class TestBlockDeviceDriver(cinder.test.TestCase):
def test_delete_snapshot(self, _clear_volume, _exists):
TEST_SNAP = obj_snap.Snapshot(volume_id=fake.VOLUME_ID,
provider_location='/dev/loop1',
status='available')
status=fields.SnapshotStatus.AVAILABLE)
with mock.patch.object(self.drv, 'local_path',
return_value='/dev/loop1') as lp_mocked:

View File

@ -27,6 +27,7 @@ from cinder import db
from cinder.db.sqlalchemy import api as sqlalchemy_api
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder import quota
from cinder import test
from cinder.tests.unit import fake_constants as fake
@ -1238,15 +1239,21 @@ class DBAPISnapshotTestCase(BaseTest):
def test_snapshot_get_all_by_filter(self):
db.volume_create(self.ctxt, {'id': 1})
db.volume_create(self.ctxt, {'id': 2})
snapshot1 = db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1,
snapshot1 = db.snapshot_create(self.ctxt,
{'id': 1, 'volume_id': 1,
'display_name': 'one',
'status': 'available'})
snapshot2 = db.snapshot_create(self.ctxt, {'id': 2, 'volume_id': 1,
'status':
fields.SnapshotStatus.AVAILABLE})
snapshot2 = db.snapshot_create(self.ctxt,
{'id': 2, 'volume_id': 1,
'display_name': 'two',
'status': 'creating'})
snapshot3 = db.snapshot_create(self.ctxt, {'id': 3, 'volume_id': 2,
'status':
fields.SnapshotStatus.CREATING})
snapshot3 = db.snapshot_create(self.ctxt,
{'id': 3, 'volume_id': 2,
'display_name': 'three',
'status': 'available'})
'status':
fields.SnapshotStatus.AVAILABLE})
# no filter
filters = {}
snapshots = db.snapshot_get_all(self.ctxt, filters=filters)
@ -1271,7 +1278,7 @@ class DBAPISnapshotTestCase(BaseTest):
self.ctxt,
filters),
ignored_keys=['metadata', 'volume'])
filters = {'status': 'error'}
filters = {'status': fields.SnapshotStatus.ERROR}
self._assertEqualListsOfObjects([],
db.snapshot_get_all(
self.ctxt,
@ -1284,13 +1291,13 @@ class DBAPISnapshotTestCase(BaseTest):
self.ctxt,
filters),
ignored_keys=['metadata', 'volume'])
filters = {'status': 'available'}
filters = {'status': fields.SnapshotStatus.AVAILABLE}
self._assertEqualListsOfObjects([snapshot1, snapshot3],
db.snapshot_get_all(
self.ctxt,
filters),
ignored_keys=['metadata', 'volume'])
filters = {'volume_id': 1, 'status': 'available'}
filters = {'volume_id': 1, 'status': fields.SnapshotStatus.AVAILABLE}
self._assertEqualListsOfObjects([snapshot1],
db.snapshot_get_all(
self.ctxt,
@ -1307,8 +1314,11 @@ class DBAPISnapshotTestCase(BaseTest):
db.volume_create(self.ctxt, {'id': 1, 'host': 'host1'})
db.volume_create(self.ctxt, {'id': 2, 'host': 'host2'})
snapshot1 = db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1})
snapshot2 = db.snapshot_create(self.ctxt, {'id': 2, 'volume_id': 2,
'status': 'error'})
snapshot2 = db.snapshot_create(self.ctxt,
{'id': 2,
'volume_id': 2,
'status':
fields.SnapshotStatus.ERROR})
self._assertEqualListsOfObjects([snapshot1],
db.snapshot_get_by_host(
@ -1320,15 +1330,13 @@ class DBAPISnapshotTestCase(BaseTest):
self.ctxt,
'host2'),
ignored_keys='volume')
self._assertEqualListsOfObjects([],
db.snapshot_get_by_host(
self.ctxt,
'host2', {'status': 'available'}),
self._assertEqualListsOfObjects(
[], db.snapshot_get_by_host(self.ctxt, 'host2', {
'status': fields.SnapshotStatus.AVAILABLE}),
ignored_keys='volume')
self._assertEqualListsOfObjects([snapshot2],
db.snapshot_get_by_host(
self.ctxt,
'host2', {'status': 'error'}),
self._assertEqualListsOfObjects(
[snapshot2], db.snapshot_get_by_host(self.ctxt, 'host2', {
'status': fields.SnapshotStatus.ERROR}),
ignored_keys='volume')
self._assertEqualListsOfObjects([],
db.snapshot_get_by_host(
@ -1368,9 +1376,9 @@ class DBAPISnapshotTestCase(BaseTest):
db.volume_create(self.ctxt, {'id': 2})
snapshot1 = db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1,
'project_id': 'project1'})
snapshot2 = db.snapshot_create(self.ctxt, {'id': 2, 'volume_id': 2,
'status': 'error',
'project_id': 'project2'})
snapshot2 = db.snapshot_create(
self.ctxt, {'id': 2, 'volume_id': 2, 'status':
fields.SnapshotStatus.ERROR, 'project_id': 'project2'})
self._assertEqualListsOfObjects([snapshot1],
db.snapshot_get_all_by_project(
@ -1382,17 +1390,16 @@ class DBAPISnapshotTestCase(BaseTest):
self.ctxt,
'project2'),
ignored_keys='volume')
self._assertEqualListsOfObjects([],
db.snapshot_get_all_by_project(
self._assertEqualListsOfObjects(
[], db.snapshot_get_all_by_project(
self.ctxt,
'project2',
{'status': 'available'}),
{'status': fields.SnapshotStatus.AVAILABLE}),
ignored_keys='volume')
self._assertEqualListsOfObjects([snapshot2],
db.snapshot_get_all_by_project(
self.ctxt,
'project2',
{'status': 'error'}),
self._assertEqualListsOfObjects(
[snapshot2], db.snapshot_get_all_by_project(
self.ctxt, 'project2', {
'status': fields.SnapshotStatus.ERROR}),
ignored_keys='volume')
self._assertEqualListsOfObjects([],
db.snapshot_get_all_by_project(

View File

@ -34,6 +34,7 @@ from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder.objects import fields
from cinder import test
from cinder import utils
from cinder.volume import driver as base_driver
@ -987,7 +988,7 @@ class GlusterFsDriverTestCase(test.TestCase):
'volume': volume,
'id': self.SNAP_UUID,
'context': ctxt,
'status': 'asdf',
'status': fields.SnapshotStatus.CREATING,
'progress': 'asdf'}
snap_path = '%s.%s' % (volume_path, self.SNAP_UUID)
@ -1003,7 +1004,7 @@ class GlusterFsDriverTestCase(test.TestCase):
'new_file': snap_file}
snap_ref_progress = snap_ref.copy()
snap_ref_progress['status'] = 'creating'
snap_ref_progress['status'] = fields.SnapshotStatus.CREATING
snap_ref_progress_0p = snap_ref_progress.copy()
snap_ref_progress_0p['progress'] = '0%'
@ -1054,7 +1055,7 @@ class GlusterFsDriverTestCase(test.TestCase):
mock.patch.object(drv, '_nova') as mock_nova,\
mock.patch.object(time, 'sleep') as mock_sleep:
snap_ref_progress = snap_ref.copy()
snap_ref_progress['status'] = 'creating'
snap_ref_progress['status'] = fields.SnapshotStatus.CREATING
snap_ref_progress_0p = snap_ref_progress.copy()
snap_ref_progress_0p['progress'] = '0%'
@ -1064,7 +1065,7 @@ class GlusterFsDriverTestCase(test.TestCase):
snap_ref_progress_99p = snap_ref_progress.copy()
snap_ref_progress_99p['progress'] = '99%'
snap_ref_progress_99p['status'] = 'error'
snap_ref_progress_99p['status'] = fields.SnapshotStatus.ERROR
mock_snapshot_get.side_effect = [
snap_ref_progress_0p, snap_ref_progress_50p,

View File

@ -1726,7 +1726,7 @@ class GPFSDriverTestCase(test.TestCase):
[])
self.driver.create_snapshot.assert_called_once_with(snapshot1)
self.assertEqual({'status': cgsnap['status']}, model_update)
self.assertEqual('available', snapshot1['status'])
self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot1['status'])
self.driver.db.snapshot_get_all_for_cgsnapshot.\
assert_called_once_with(ctxt, cgsnap['id'])
@ -1758,7 +1758,7 @@ class GPFSDriverTestCase(test.TestCase):
[])
self.driver.delete_snapshot.assert_called_once_with(snapshot1)
self.assertEqual({'status': cgsnap['status']}, model_update)
self.assertEqual('deleted', snapshot1['status'])
self.assertEqual(fields.SnapshotStatus.DELETED, snapshot1['status'])
self.driver.db.snapshot_get_all_for_cgsnapshot.\
assert_called_once_with(ctxt, cgsnap['id'])

View File

@ -231,7 +231,7 @@ class HPE3PARBaseDriver(object):
'project_id': PROJECT_ID,
'volume_id': VOLUME_ID_SNAP,
'volume_name': VOLUME_NAME,
'status': 'creating',
'status': fields.SnapshotStatus.CREATING,
'progress': '0%',
'volume_size': 2,
'display_name': 'fakesnap',

View File

@ -96,7 +96,7 @@ class QuotaIntegrationTestCase(test.TestCase):
snapshot.volume_id = volume['id']
snapshot.volume_size = volume['size']
snapshot.host = volume['host']
snapshot.status = 'available'
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.create()
return snapshot

View File

@ -3001,7 +3001,7 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase):
cg_snapshot.id,
cg_snapshot.name,
cg_snapshot.id,
"creating"))
fields.SnapshotStatus.CREATING))
return cg_snapshot, snapshots
@ -3015,8 +3015,8 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase):
"CGSnapshot created failed")
for snapshot in snapshots_model:
self.assertEqual('available', snapshot['status'])
self.assertEqual(fields.SnapshotStatus.AVAILABLE,
snapshot['status'])
return cg_snapshot, snapshots
def _create_test_vol(self, opts):
@ -4261,7 +4261,8 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase):
"CGSnapshot created failed")
for snapshot in model_update[1]:
self.assertEqual('available', snapshot['status'])
self.assertEqual(fields.SnapshotStatus.AVAILABLE,
snapshot['status'])
model_update = self.driver.delete_consistencygroup(self.ctxt,
cg, volumes)

View File

@ -50,6 +50,7 @@ from cinder import keymgr
from cinder.message import defined_messages
from cinder.message import resource_types
from cinder import objects
from cinder.objects import fields
import cinder.policy
from cinder import quota
from cinder import test
@ -578,15 +579,16 @@ class VolumeTestCase(BaseVolumeTestCase):
"""
volume = tests_utils.create_volume(self.context,
**self.volume_params)
snapshot = tests_utils.create_snapshot(self.context,
snapshot = tests_utils.create_snapshot(
self.context,
volume['id'],
status='creating')
status=fields.SnapshotStatus.CREATING)
snap_id = snapshot['id']
self.volume.init_host()
snapshot_obj = objects.Snapshot.get_by_id(self.context, snap_id)
self.assertEqual('error', snapshot_obj.status)
self.assertEqual(fields.SnapshotStatus.ERROR, snapshot_obj.status)
self.volume.delete_snapshot(self.context, snapshot_obj)
self.volume.delete_volume(self.context, volume.id, volume=volume)
@ -1316,7 +1318,7 @@ class VolumeTestCase(BaseVolumeTestCase):
'biz')
snapshot = {'id': fake.SNAPSHOT_ID,
'status': 'available',
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 10,
'volume_type_id': biz_type['id']}
snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context,
@ -1499,7 +1501,7 @@ class VolumeTestCase(BaseVolumeTestCase):
'volume_type_id': biz_type['id']}
snapshot = {'id': fake.SNAPSHOT_ID,
'status': 'available',
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 10,
'volume_type_id': biz_type['id']}
snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context,
@ -1541,7 +1543,7 @@ class VolumeTestCase(BaseVolumeTestCase):
self.context, volume_src['id'], snapshot_obj)
# NOTE(flaper87): The volume status should be error.
self.assertEqual("error", snapshot_obj.status)
self.assertEqual(fields.SnapshotStatus.ERROR, snapshot_obj.status)
# lets cleanup the mess
self.volume.driver._initialized = True
@ -1821,7 +1823,7 @@ class VolumeTestCase(BaseVolumeTestCase):
self.volume.create_snapshot(self.context, src_vol['id'], snapshot_obj)
# ensure that status of snapshot is 'available'
self.assertEqual('available', snapshot_obj.status)
self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot_obj.status)
dst_vol = tests_utils.create_volume(self.context,
snapshot_id=snapshot_id,
@ -1880,7 +1882,7 @@ class VolumeTestCase(BaseVolumeTestCase):
self.volume.create_snapshot(self.context, volume['id'], snapshot_obj)
# ensure that status of snapshot is 'available'
self.assertEqual('available', snapshot_obj.status)
self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot_obj.status)
# create volume from snapshot
dst_vol = tests_utils.create_volume(self.context,
@ -2021,7 +2023,8 @@ class VolumeTestCase(BaseVolumeTestCase):
volume_src,
'name',
'description')
snapshot_ref['status'] = 'available' # status must be available
snapshot_ref['status'] = fields.SnapshotStatus.AVAILABLE
# status must be available
volume_dst = volume_api.create(self.context,
1,
'name',
@ -2109,8 +2112,9 @@ class VolumeTestCase(BaseVolumeTestCase):
def test_create_volume_from_snapshot_fail_bad_size(self):
"""Test volume can't be created from snapshot with bad volume size."""
volume_api = cinder.volume.api.API()
snapshot = {'id': fake.SNAPSHOT_ID,
'status': 'available',
'status': fields.SnapshotStatus.AVAILABLE,
'volume_size': 10}
snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context,
**snapshot)
@ -3102,7 +3106,7 @@ class VolumeTestCase(BaseVolumeTestCase):
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[3]
self.assertEqual('snapshot.create.end', msg['event_type'])
expected['status'] = 'available'
expected['status'] = fields.SnapshotStatus.AVAILABLE
self.assertDictMatch(expected, msg['payload'])
if len(self.notifier.notifications) > 4:
@ -3117,7 +3121,7 @@ class VolumeTestCase(BaseVolumeTestCase):
self.volume.delete_snapshot(self.context, snapshot)
msg = self.notifier.notifications[4]
self.assertEqual('snapshot.delete.start', msg['event_type'])
expected['status'] = 'available'
expected['status'] = fields.SnapshotStatus.AVAILABLE
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[5]
self.assertEqual('snapshot.delete.end', msg['event_type'])
@ -3134,7 +3138,7 @@ class VolumeTestCase(BaseVolumeTestCase):
snap = objects.Snapshot.get_by_id(context.get_admin_context(
read_deleted='yes'), snapshot_id)
self.assertEqual('deleted', snap.status)
self.assertEqual(fields.SnapshotStatus.DELETED, snap.status)
self.assertRaises(exception.NotFound,
db.snapshot_get,
self.context,
@ -3297,19 +3301,32 @@ class VolumeTestCase(BaseVolumeTestCase):
def test_can_delete_errored_snapshot(self):
"""Test snapshot can be created and deleted."""
volume = tests_utils.create_volume(self.context, CONF.host)
snapshot = create_snapshot(volume.id, size=volume['size'],
ctxt=self.context, status='bad')
ctxt=self.context,
status=fields.SnapshotStatus.ERROR)
self.volume_api.delete_snapshot(self.context, snapshot)
self.assertEqual(fields.SnapshotStatus.DELETING, snapshot.status)
self.volume.delete_volume(self.context, volume.id)
def test_cannot_delete_snapshot_with_bad_status(self):
volume = tests_utils.create_volume(self.context, CONF.host)
snapshot = create_snapshot(volume.id, size=volume['size'],
ctxt=self.context,
status=fields.SnapshotStatus.CREATING)
self.assertRaises(exception.InvalidSnapshot,
self.volume_api.delete_snapshot,
self.context,
snapshot)
snapshot.status = 'error'
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
self.volume_api.delete_snapshot(self.context, snapshot)
self.assertEqual('deleting', snapshot.status)
self.volume.delete_volume(self.context, volume.id, volume=volume)
self.assertEqual(fields.SnapshotStatus.DELETING, snapshot.status)
self.volume.delete_volume(self.context, volume.id)
def test_create_snapshot_force(self):
"""Test snapshot in use can be created forcibly."""
@ -3391,7 +3408,7 @@ class VolumeTestCase(BaseVolumeTestCase):
self.assertDictMatch(vol_glance_dict, snap_glance_dict)
# ensure that snapshot's status is changed to 'available'
self.assertEqual('available', snap.status)
self.assertEqual(fields.SnapshotStatus.AVAILABLE, snap.status)
# cleanup resource
snap.destroy()
@ -3436,7 +3453,7 @@ class VolumeTestCase(BaseVolumeTestCase):
ctxt, snap.id)
# ensure that status of snapshot is 'error'
self.assertEqual('error', snap.status)
self.assertEqual(fields.SnapshotStatus.ERROR, snap.status)
# cleanup resource
snap.destroy()
@ -3458,7 +3475,7 @@ class VolumeTestCase(BaseVolumeTestCase):
self.context, snapshot.id)
# ensure that status of snapshot is 'available'
self.assertEqual('available', snapshot.status)
self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot.status)
# cleanup resource
snapshot.destroy()
@ -3488,7 +3505,7 @@ class VolumeTestCase(BaseVolumeTestCase):
self.volume.delete_snapshot(self.context, snapshot)
snapshot_ref = objects.Snapshot.get_by_id(self.context, snapshot_id)
self.assertEqual(snapshot_id, snapshot_ref.id)
self.assertEqual("available", snapshot_ref.status)
self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot_ref.status)
@test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX")
def test_delete_no_dev_fails(self):
@ -3515,7 +3532,7 @@ class VolumeTestCase(BaseVolumeTestCase):
self.volume.delete_snapshot(self.context, snapshot)
snapshot_ref = objects.Snapshot.get_by_id(self.context, snapshot_id)
self.assertEqual(snapshot_id, snapshot_ref.id)
self.assertEqual("available", snapshot_ref.status)
self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot_ref.status)
self.mox.UnsetStubs()
self.assertRaises(exception.VolumeBackendAPIException,
@ -4384,7 +4401,7 @@ class VolumeTestCase(BaseVolumeTestCase):
snapshot.id, objects.Snapshot.get_by_id(self.context,
snapshot.id).id)
snapshot.update({'status': 'in-use'})
snapshot.update({'status': fields.SnapshotStatus.CREATING})
snapshot.save()
volume['status'] = 'available'
@ -5700,7 +5717,7 @@ class GetActiveByWindowTestCase(BaseVolumeTestCase):
def setUp(self):
super(GetActiveByWindowTestCase, self).setUp()
self.ctx = context.get_admin_context(read_deleted="yes")
self.db_attrs = [
self.db_vol_attrs = [
{
'id': fake.VOLUME_ID,
'host': 'devstack',
@ -5740,23 +5757,71 @@ class GetActiveByWindowTestCase(BaseVolumeTestCase):
}
]
self.db_snap_attrs = [
{
'id': fake.SNAPSHOT_ID,
'host': 'devstack',
'project_id': 'p1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True,
'status': fields.SnapshotStatus.DELETED,
'deleted_at': datetime.datetime(1, 2, 1, 1, 1, 1),
'volume_id': fake.VOLUME_ID,
},
{
'id': fake.SNAPSHOT2_ID,
'host': 'devstack',
'project_id': 'p1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True,
'status': fields.SnapshotStatus.DELETED,
'deleted_at': datetime.datetime(1, 3, 10, 1, 1, 1),
'volume_id': fake.VOLUME_ID,
},
{
'id': fake.SNAPSHOT3_ID,
'host': 'devstack',
'project_id': 'p1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True,
'status': fields.SnapshotStatus.DELETED,
'deleted_at': datetime.datetime(1, 5, 1, 1, 1, 1),
'volume_id': fake.VOLUME_ID,
},
{
'id': fake.SNAPSHOT_ID,
'host': 'devstack',
'project_id': 'p1',
'created_at': datetime.datetime(1, 3, 10, 1, 1, 1),
'volume_id': fake.VOLUME_ID,
},
{
'id': fake.SNAPSHOT2_ID,
'host': 'devstack',
'project_id': 'p1',
'created_at': datetime.datetime(1, 5, 1, 1, 1, 1),
'volume_id': fake.VOLUME_ID
}
]
def test_volume_get_active_by_window(self):
# Find all all volumes valid within a timeframe window.
# Not in window
db.volume_create(self.ctx, self.db_attrs[0])
db.volume_create(self.ctx, self.db_vol_attrs[0])
# In - deleted in window
db.volume_create(self.ctx, self.db_attrs[1])
db.volume_create(self.ctx, self.db_vol_attrs[1])
# In - deleted after window
db.volume_create(self.ctx, self.db_attrs[2])
db.volume_create(self.ctx, self.db_vol_attrs[2])
# In - created in window
db.volume_create(self.context, self.db_attrs[3])
db.volume_create(self.context, self.db_vol_attrs[3])
# Not of window.
db.volume_create(self.context, self.db_attrs[4])
db.volume_create(self.context, self.db_vol_attrs[4])
volumes = db.volume_get_active_by_window(
self.context,
@ -5772,31 +5837,31 @@ class GetActiveByWindowTestCase(BaseVolumeTestCase):
# Find all all snapshots valid within a timeframe window.
db.volume_create(self.context, {'id': fake.VOLUME_ID})
for i in range(5):
self.db_attrs[i]['volume_id'] = fake.VOLUME_ID
self.db_vol_attrs[i]['volume_id'] = fake.VOLUME_ID
# Not in window
del self.db_attrs[0]['id']
snap1 = objects.Snapshot(self.ctx, **self.db_attrs[0])
del self.db_snap_attrs[0]['id']
snap1 = objects.Snapshot(self.ctx, **self.db_snap_attrs[0])
snap1.create()
# In - deleted in window
del self.db_attrs[1]['id']
snap2 = objects.Snapshot(self.ctx, **self.db_attrs[1])
del self.db_snap_attrs[1]['id']
snap2 = objects.Snapshot(self.ctx, **self.db_snap_attrs[1])
snap2.create()
# In - deleted after window
del self.db_attrs[2]['id']
snap3 = objects.Snapshot(self.ctx, **self.db_attrs[2])
del self.db_snap_attrs[2]['id']
snap3 = objects.Snapshot(self.ctx, **self.db_snap_attrs[2])
snap3.create()
# In - created in window
del self.db_attrs[3]['id']
snap4 = objects.Snapshot(self.ctx, **self.db_attrs[3])
del self.db_snap_attrs[3]['id']
snap4 = objects.Snapshot(self.ctx, **self.db_snap_attrs[3])
snap4.create()
# Not of window.
del self.db_attrs[4]['id']
snap5 = objects.Snapshot(self.ctx, **self.db_attrs[4])
del self.db_snap_attrs[4]['id']
snap5 = objects.Snapshot(self.ctx, **self.db_snap_attrs[4])
snap5.create()
snapshots = objects.SnapshotList.get_active_by_window(

View File

@ -23,6 +23,7 @@ from oslo_serialization import jsonutils
from cinder import context
from cinder import db
from cinder import objects
from cinder.objects import fields
from cinder import test
from cinder.tests.unit import fake_backup
from cinder.tests.unit import fake_constants as fake
@ -51,7 +52,7 @@ class VolumeRpcAPITestCase(test.TestCase):
volume = db.volume_create(self.context, vol)
kwargs = {
'status': "creating",
'status': fields.SnapshotStatus.CREATING,
'progress': '0%',
'display_name': 'fake_name',
'display_description': 'fake_description'}
@ -419,7 +420,7 @@ class VolumeRpcAPITestCase(test.TestCase):
snpshot = {
'id': fake.SNAPSHOT_ID,
'volume_id': fake.VOLUME_ID,
'status': "creating",
'status': fields.SnapshotStatus.CREATING,
'progress': '0%',
'volume_size': 0,
'display_name': 'fake_name',

View File

@ -26,6 +26,7 @@ from oslo_config import cfg
from cinder import context
from cinder import exception
from cinder.objects import fields
from cinder import test
from cinder.tests.unit import fake_backup
from cinder.tests.unit import fake_constants as fake
@ -134,7 +135,7 @@ class NotifyUsageTestCase(test.TestCase):
'id': fake.SNAPSHOT_ID,
'display_name': '11',
'created_at': '2014-12-11T10:10:00',
'status': 'pause',
'status': fields.SnapshotStatus.ERROR,
'deleted': '',
'snapshot_metadata': [{'key': 'fake_snap_meta_key',
'value': 'fake_snap_meta_value'}],
@ -152,7 +153,7 @@ class NotifyUsageTestCase(test.TestCase):
'snapshot_id': fake.SNAPSHOT_ID,
'display_name': '11',
'created_at': 'DONTCARE',
'status': 'pause',
'status': fields.SnapshotStatus.ERROR,
'deleted': '',
'metadata': six.text_type({'fake_snap_meta_key':
u'fake_snap_meta_value'}),

View File

@ -99,7 +99,7 @@ def create_snapshot(ctxt,
display_name='test_snapshot',
display_description='this is a test snapshot',
cgsnapshot_id = None,
status='creating',
status=fields.SnapshotStatus.CREATING,
**kwargs):
vol = db.volume_get(ctxt, volume_id)
snap = objects.Snapshot(ctxt)

View File

@ -790,7 +790,7 @@ class API(base.Base):
'cgsnapshot_id': cgsnapshot_id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'status': fields.SnapshotStatus.CREATING,
'progress': '0%',
'volume_size': volume['size'],
'display_name': name,
@ -909,7 +909,7 @@ class API(base.Base):
'cgsnapshot_id': cgsnapshot_id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': "creating",
'status': fields.SnapshotStatus.CREATING,
'progress': '0%',
'volume_size': volume['size'],
'display_name': name,
@ -943,9 +943,11 @@ class API(base.Base):
expected = {'cgsnapshot_id': None}
# If not force deleting we have status conditions
if not force:
expected['status'] = ('available', 'error')
expected['status'] = (fields.SnapshotStatus.AVAILABLE,
fields.SnapshotStatus.ERROR)
result = snapshot.conditional_update({'status': 'deleting'}, expected)
result = snapshot.conditional_update(
{'status': fields.SnapshotStatus.DELETING}, expected)
if not result:
status = utils.build_or_str(expected.get('status'),
_('status must be %s and'))

View File

@ -29,6 +29,7 @@ from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.image import image_utils
from cinder import objects
from cinder.objects import fields
from cinder import utils
from cinder.volume import driver_utils
from cinder.volume import rpcapi as volume_rpcapi
@ -1326,7 +1327,7 @@ class BaseVD(object):
'cgsnapshot_id': None,
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'status': fields.SnapshotStatus.CREATING,
'progress': '0%',
'volume_size': volume['size'],
'display_name': 'backup-snap-%s' % volume['id'],
@ -1346,7 +1347,7 @@ class BaseVD(object):
context, temp_snap_ref.id)
temp_snap_ref.destroy()
temp_snap_ref.status = 'available'
temp_snap_ref.status = fields.SnapshotStatus.AVAILABLE
temp_snap_ref.save()
return temp_snap_ref

View File

@ -18,6 +18,7 @@ from oslo_utils import excutils
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.objects import fields
from cinder.volume import driver
from cinder.volume.drivers.dell import dell_storagecenter_api
from cinder.volume.drivers.san.san import san_opts
@ -755,7 +756,7 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
for snapshot in snapshots:
snapshot_updates.append({
'id': snapshot.id,
'status': 'available'
'status': fields.SnapshotStatus.AVAILABLE
})
model_update = {'status': 'available'}
@ -796,7 +797,7 @@ class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
raise exception.VolumeBackendAPIException(data=msg)
for snapshot in snapshots:
snapshot.status = 'deleted'
snapshot.status = fields.SnapshotStatus.DELETED
model_update = {'status': 'deleted'}

View File

@ -2677,7 +2677,8 @@ class EMCVMAXCommon(object):
for snapshot in snapshots:
snapshots_model_update.append(
{'id': snapshot['id'], 'status': 'available'})
{'id': snapshot['id'],
'status': fields.SnapshotStatus.AVAILABLE})
modelUpdate = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
return modelUpdate, snapshots_model_update
@ -2716,7 +2717,8 @@ class EMCVMAXCommon(object):
snapshots, extraSpecs)
for snapshot in snapshots:
snapshots_model_update.append(
{'id': snapshot['id'], 'status': 'deleted'})
{'id': snapshot['id'],
'status': fields.SnapshotStatus.DELETED})
except Exception:
exceptionMessage = (_("Failed to delete snapshot for cg: "
"%(cgId)s.")

View File

@ -3285,7 +3285,8 @@ class EMCVnxCliBase(object):
cgsnapshot['id'])
for snapshot in snapshots:
snapshots_model_update.append(
{'id': snapshot['id'], 'status': 'available'})
{'id': snapshot['id'],
'status': fields.SnapshotStatus.AVAILABLE})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Create cg snapshot %s failed.'),
@ -3310,7 +3311,8 @@ class EMCVnxCliBase(object):
self._client.delete_cgsnapshot(cgsnapshot['id'])
for snapshot in snapshots:
snapshots_model_update.append(
{'id': snapshot['id'], 'status': 'deleted'})
{'id': snapshot['id'],
'status': fields.SnapshotStatus.DELETED})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Delete cgsnapshot %s failed.'),

View File

@ -735,7 +735,7 @@ class XtremIOVolumeDriver(san.SanDriver):
context, cgsnapshot['id'])
for snapshot in snapshots:
snapshot.status = 'available'
snapshot.status = fields.SnapshotStatus.AVAILABLE
model_update = {'status': 'available'}
@ -750,7 +750,7 @@ class XtremIOVolumeDriver(san.SanDriver):
context, cgsnapshot['id'])
for snapshot in snapshots:
snapshot.status = 'deleted'
snapshot.status = fields.SnapshotStatus.DELETED
model_update = {'status': cgsnapshot.status}

View File

@ -662,7 +662,7 @@ class HPE3PARCommon(object):
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_update = {'id': snapshot['id'],
'status': 'available'}
'status': fields.SnapshotStatus.AVAILABLE}
snapshot_model_updates.append(snapshot_update)
model_update = {'status': 'available'}
@ -680,20 +680,20 @@ class HPE3PARCommon(object):
try:
snap_name = cgsnap_name + "-" + six.text_type(i)
self.client.deleteVolume(snap_name)
snapshot_update['status'] = 'deleted'
snapshot_update['status'] = fields.SnapshotStatus.DELETED
except hpeexceptions.HTTPNotFound as ex:
# We'll let this act as if it worked
# it helps clean up the cinder entries.
LOG.warning(_LW("Delete Snapshot id not found. Removing from "
"cinder: %(id)s Ex: %(msg)s"),
{'id': snapshot['id'], 'msg': ex})
snapshot_update['status'] = 'error'
snapshot_update['status'] = fields.SnapshotStatus.ERROR
except Exception as ex:
LOG.error(_LE("There was an error deleting snapshot %(id)s: "
"%(error)."),
{'id': snapshot['id'],
'error': six.text_type(ex)})
snapshot_update['status'] = 'error'
snapshot_update['status'] = fields.SnapshotStatus.ERROR
snapshot_model_updates.append(snapshot_update)
model_update = {'status': cgsnapshot.status}

View File

@ -522,7 +522,7 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver):
'snapshotName': snapshot_name}
snap_set.append(snap_set_member)
snapshot_update = {'id': snapshot['id'],
'status': 'available'}
'status': fields.SnapshotStatus.AVAILABLE}
snapshot_model_updates.append(snapshot_update)
source_volume_id = snap_set[0]['volumeId']
@ -562,20 +562,20 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver):
snap_name = snap_name_base + "-" + six.text_type(i)
snap_info = client.getSnapshotByName(snap_name)
client.deleteSnapshot(snap_info['id'])
snapshot_update['status'] = 'deleted'
snapshot_update['status'] = fields.SnapshotStatus.DELETED
except hpeexceptions.HTTPServerError as ex:
in_use_msg = ('cannot be deleted because it is a clone '
'point')
if in_use_msg in ex.get_description():
LOG.error(_LE("The snapshot cannot be deleted because "
"it is a clone point."))
snapshot_update['status'] = 'error'
snapshot_update['status'] = fields.SnapshotStatus.ERROR
except Exception as ex:
LOG.error(_LE("There was an error deleting snapshot %(id)s: "
"%(error)."),
{'id': snapshot['id'],
'error': six.text_type(ex)})
snapshot_update['status'] = 'error'
snapshot_update['status'] = fields.SnapshotStatus.ERROR
snapshot_model_updates.append(snapshot_update)
self._logout(client)

View File

@ -1209,7 +1209,7 @@ class GPFSDriver(driver.ConsistencyGroupVD, driver.ExtendVD,
for snapshot in snapshots:
self.create_snapshot(snapshot)
snapshot['status'] = 'available'
snapshot['status'] = fields.SnapshotStatus.AVAILABLE
model_update = {'status': 'available'}
@ -1222,7 +1222,7 @@ class GPFSDriver(driver.ConsistencyGroupVD, driver.ExtendVD,
for snapshot in snapshots:
self.delete_snapshot(snapshot)
snapshot['status'] = 'deleted'
snapshot['status'] = fields.SnapshotStatus.DELETED
model_update = {'status': cgsnapshot['status']}

View File

@ -934,7 +934,7 @@ class DPLCOMMONDriver(driver.ConsistencyGroupVD, driver.ExtendVD,
cgsnapshot.get('description', ''),
True)
for snapshot in snapshots:
snapshot.status = 'available'
snapshot.status = fields.SnapshotStatus.AVAILABLE
except Exception as e:
msg = _('Failed to create cg snapshot %(id)s '
'due to %(reason)s.') % {'id': cgsnapshot['id'],
@ -960,7 +960,7 @@ class DPLCOMMONDriver(driver.ConsistencyGroupVD, driver.ExtendVD,
self._conver_uuid2hex(cgsnapshot['consistencygroup_id']),
self._conver_uuid2hex(cgsnapshot['id']), True)
for snapshot in snapshots:
snapshot.status = 'deleted'
snapshot.status = fields.SnapshotStatus.DELETED
except Exception as e:
msg = _('Failed to delete cgsnapshot %(id)s due to '
'%(reason)s.') % {'id': cgsnapshot['id'],

View File

@ -30,6 +30,7 @@ import six
from cinder import compute
from cinder import db
from cinder import exception
from cinder.objects import fields
from cinder import utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
@ -1301,20 +1302,21 @@ class RemoteFSSnapDriver(RemoteFSDriver, driver.SnapshotVD):
{'id': snapshot['id'],
'status': s['status']})
if s['status'] == 'creating':
if s['status'] == fields.SnapshotStatus.CREATING:
if s['progress'] == '90%':
# Nova tasks completed successfully
break
time.sleep(increment)
seconds_elapsed += increment
elif s['status'] == 'error':
elif s['status'] == fields.SnapshotStatus.ERROR:
msg = _('Nova returned "error" status '
'while creating snapshot.')
raise exception.RemoteFSException(msg)
elif s['status'] == 'deleting' or s['status'] == 'error_deleting':
elif (s['status'] == fields.SnapshotStatus.DELETING or
s['status'] == fields.SnapshotStatus.ERROR_DELETING):
msg = _('Snapshot %(id)s has been asked to be deleted while '
'waiting for it to become available. Perhaps a '
'concurrent request was made.') % {'id':
@ -1389,7 +1391,7 @@ class RemoteFSSnapDriver(RemoteFSDriver, driver.SnapshotVD):
while True:
s = db.snapshot_get(context, snapshot['id'])
if s['status'] == 'deleting':
if s['status'] == fields.SnapshotStatus.DELETING:
if s['progress'] == '90%':
# Nova tasks completed successfully
break

View File

@ -23,6 +23,7 @@ from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LW
from cinder import objects
from cinder.objects import fields
from cinder import policy
from cinder import quota
from cinder import utils
@ -40,7 +41,7 @@ QUOTAS = quota.QUOTAS
# Only in these 'sources' status can we attempt to create a volume from a
# source volume or a source snapshot, other status states we can not create
# from, 'error' being the common example.
SNAPSHOT_PROCEED_STATUS = ('available',)
SNAPSHOT_PROCEED_STATUS = (fields.SnapshotStatus.AVAILABLE,)
SRC_VOL_PROCEED_STATUS = ('available', 'in-use',)
REPLICA_PROCEED_STATUS = ('active', 'active-stopped',)
CG_PROCEED_STATUS = ('available', 'creating',)

View File

@ -89,7 +89,7 @@ VALID_REMOVE_VOL_FROM_CG_STATUS = (
VALID_ADD_VOL_TO_CG_STATUS = (
'available',
'in-use')
VALID_CREATE_CG_SRC_SNAP_STATUS = ('available',)
VALID_CREATE_CG_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_CG_SRC_CG_STATUS = ('available',)
volume_manager_opts = [
@ -480,11 +480,11 @@ class VolumeManager(manager.SchedulerDependentManager):
else:
pass
snapshots = objects.SnapshotList.get_by_host(
ctxt, self.host, {'status': 'creating'})
ctxt, self.host, {'status': fields.SnapshotStatus.CREATING})
for snapshot in snapshots:
LOG.warning(_LW("Detected snapshot stuck in creating "
"status, setting to ERROR."), resource=snapshot)
snapshot.status = 'error'
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
except Exception:
LOG.exception(_LE("Error during re-export on driver init."),
@ -848,7 +848,7 @@ class VolumeManager(manager.SchedulerDependentManager):
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = 'error'
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
vol_ref = self.db.volume_get(context, volume_id)
@ -866,11 +866,11 @@ class VolumeManager(manager.SchedulerDependentManager):
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id}, resource=snapshot)
snapshot.status = 'error'
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
snapshot.status = 'available'
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
@ -907,12 +907,12 @@ class VolumeManager(manager.SchedulerDependentManager):
except exception.SnapshotIsBusy:
LOG.error(_LE("Delete snapshot failed, due to snapshot busy."),
resource=snapshot)
snapshot.status = 'available'
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
return
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = 'error_deleting'
snapshot.status = fields.SnapshotStatus.ERROR_DELETING
snapshot.save()
# Get reservations
@ -3005,7 +3005,9 @@ class VolumeManager(manager.SchedulerDependentManager):
snap_model['id'],
snap_model)
if (snap_model['status'] in ['error_deleting', 'error'] and
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
@ -3028,7 +3030,7 @@ class VolumeManager(manager.SchedulerDependentManager):
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = 'error'
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
@ -3048,13 +3050,15 @@ class VolumeManager(manager.SchedulerDependentManager):
# TODO(thangp): Switch over to use snapshot.update()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_update(context, snapshot_id,
{'status': 'error'})
self.db.snapshot_update(
context, snapshot_id, {
'status': fields.SnapshotStatus.ERROR})
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
self.db.snapshot_update(context,
snapshot['id'], {'status': 'available',
snapshot['id'],
{'status': fields.SnapshotStatus.AVAILABLE,
'progress': '100%'})
cgsnapshot.status = 'available'
@ -3108,7 +3112,9 @@ class VolumeManager(manager.SchedulerDependentManager):
snap.status = snap_model['status']
snap.save()
if (snap_model['status'] in ['error_deleting', 'error'] and
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
@ -3131,7 +3137,7 @@ class VolumeManager(manager.SchedulerDependentManager):
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = 'error'
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots: