Make pep8 checks a bit stricter.
Along with moving to pep8 1.3.3, we also want to standardize on what we ignore. This patch get's us most of the way there by setting the ignore list to: N4,E125, E126, E711,E712. Almost all changes made here are white-space/indentation changes. The removal of Hacking N4 errors from the ignore list will be handled in a seperate patch. Change-Id: If45f156600485d23769449018590f60b4f69b0c5
This commit is contained in:
parent
0fb98971a7
commit
51418bdd5b
@ -127,8 +127,9 @@ class ShellCommands(object):
|
|||||||
Falls back to Python shell if unavailable"""
|
Falls back to Python shell if unavailable"""
|
||||||
self.run('python')
|
self.run('python')
|
||||||
|
|
||||||
@args('--shell', dest="shell", metavar='<bpython|ipython|python >',
|
@args('--shell', dest="shell",
|
||||||
help='Python shell')
|
metavar='<bpython|ipython|python >',
|
||||||
|
help='Python shell')
|
||||||
def run(self, shell=None):
|
def run(self, shell=None):
|
||||||
"""Runs a Python interactive interpreter."""
|
"""Runs a Python interactive interpreter."""
|
||||||
if not shell:
|
if not shell:
|
||||||
@ -180,7 +181,7 @@ def _db_error(caught_exception):
|
|||||||
|
|
||||||
|
|
||||||
class HostCommands(object):
|
class HostCommands(object):
|
||||||
"""List hosts"""
|
"""List hosts."""
|
||||||
|
|
||||||
def list(self, zone=None):
|
def list(self, zone=None):
|
||||||
"""Show a list of all physical hosts. Filter by zone.
|
"""Show a list of all physical hosts. Filter by zone.
|
||||||
@ -206,8 +207,9 @@ class DbCommands(object):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@args('--version', dest='version', metavar='<version>',
|
@args('--version', dest='version',
|
||||||
help='Database version')
|
metavar='<version>',
|
||||||
|
help='Database version')
|
||||||
def sync(self, version=None):
|
def sync(self, version=None):
|
||||||
"""Sync the database up to the most recent version."""
|
"""Sync the database up to the most recent version."""
|
||||||
return migration.db_sync(version)
|
return migration.db_sync(version)
|
||||||
@ -224,9 +226,10 @@ class VersionCommands(object):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def list(self):
|
def list(self):
|
||||||
print _("%(version)s (%(vcs)s)") % \
|
print(
|
||||||
{'version': version.version_string(),
|
_("%(version)s (%(vcs)s)") %
|
||||||
'vcs': version.version_string_with_vcs()}
|
{'version': version.version_string(),
|
||||||
|
'vcs': version.version_string_with_vcs()})
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
self.list()
|
self.list()
|
||||||
@ -313,7 +316,7 @@ class ImportCommands(object):
|
|||||||
columns = table.columns.keys()
|
columns = table.columns.keys()
|
||||||
for row in src.query(table).all():
|
for row in src.query(table).all():
|
||||||
data = dict([(str(column), getattr(row, column))
|
data = dict([(str(column), getattr(row, column))
|
||||||
for column in columns])
|
for column in columns])
|
||||||
dest.add(new_row(**data))
|
dest.add(new_row(**data))
|
||||||
dest.commit()
|
dest.commit()
|
||||||
|
|
||||||
@ -325,7 +328,7 @@ class ImportCommands(object):
|
|||||||
for row in src.query(table).all():
|
for row in src.query(table).all():
|
||||||
if row.resource == 'gigabytes' or row.resource == 'volumes':
|
if row.resource == 'gigabytes' or row.resource == 'volumes':
|
||||||
data = dict([(str(column), getattr(row, column))
|
data = dict([(str(column), getattr(row, column))
|
||||||
for column in columns])
|
for column in columns])
|
||||||
dest.add(new_row(**data))
|
dest.add(new_row(**data))
|
||||||
dest.commit()
|
dest.commit()
|
||||||
|
|
||||||
@ -352,10 +355,14 @@ class ImportCommands(object):
|
|||||||
dest_db = '%s/cinder' % dest_db
|
dest_db = '%s/cinder' % dest_db
|
||||||
self._import_db(src_db, dest_db, backup_db)
|
self._import_db(src_db, dest_db, backup_db)
|
||||||
|
|
||||||
@args('--src', dest='src_tgts', metavar='<src tgts>',
|
@args('--src',
|
||||||
help='[login@src_host:]/opt/stack/nova/volumes/')
|
dest='src_tgts',
|
||||||
@args('--dest', dest='dest_tgts', metavar='<dest tgts>',
|
metavar='<src tgts>',
|
||||||
help='[login@src_host:/opt/stack/cinder/volumes/]')
|
help='[login@src_host:]/opt/stack/nova/volumes/')
|
||||||
|
@args('--dest',
|
||||||
|
dest='dest_tgts',
|
||||||
|
metavar='<dest tgts>',
|
||||||
|
help='[login@src_host:/opt/stack/cinder/volumes/]')
|
||||||
def copy_ptgt_files(self, src_tgts, dest_tgts=None):
|
def copy_ptgt_files(self, src_tgts, dest_tgts=None):
|
||||||
"""Copy persistent scsi tgt files from nova to cinder.
|
"""Copy persistent scsi tgt files from nova to cinder.
|
||||||
|
|
||||||
@ -380,10 +387,12 @@ class ImportCommands(object):
|
|||||||
|
|
||||||
|
|
||||||
class VolumeCommands(object):
|
class VolumeCommands(object):
|
||||||
"""Methods for dealing with a cloud in an odd state"""
|
"""Methods for dealing with a cloud in an odd state."""
|
||||||
|
|
||||||
@args('--volume', dest='volume_id', metavar='<volume id>',
|
@args('--volume',
|
||||||
help='Volume ID')
|
dest='volume_id',
|
||||||
|
metavar='<volume id>',
|
||||||
|
help='Volume ID')
|
||||||
def delete(self, volume_id):
|
def delete(self, volume_id):
|
||||||
"""Delete a volume, bypassing the check that it
|
"""Delete a volume, bypassing the check that it
|
||||||
must be available."""
|
must be available."""
|
||||||
@ -407,8 +416,10 @@ class VolumeCommands(object):
|
|||||||
{"method": "delete_volume",
|
{"method": "delete_volume",
|
||||||
"args": {"volume_id": volume['id']}})
|
"args": {"volume_id": volume['id']}})
|
||||||
|
|
||||||
@args('--volume', dest='volume_id', metavar='<volume id>',
|
@args('--volume',
|
||||||
help='Volume ID')
|
dest='volume_id',
|
||||||
|
metavar='<volume id>',
|
||||||
|
help='Volume ID')
|
||||||
def reattach(self, volume_id):
|
def reattach(self, volume_id):
|
||||||
"""Re-attach a volume that has previously been attached
|
"""Re-attach a volume that has previously been attached
|
||||||
to an instance. Typically called after a compute host
|
to an instance. Typically called after a compute host
|
||||||
@ -429,7 +440,7 @@ class VolumeCommands(object):
|
|||||||
|
|
||||||
|
|
||||||
class StorageManagerCommands(object):
|
class StorageManagerCommands(object):
|
||||||
"""Class for mangaging Storage Backends and Flavors"""
|
"""Class for mangaging Storage Backends and Flavors."""
|
||||||
|
|
||||||
def flavor_list(self, flavor=None):
|
def flavor_list(self, flavor=None):
|
||||||
ctxt = context.get_admin_context()
|
ctxt = context.get_admin_context()
|
||||||
@ -449,9 +460,9 @@ class StorageManagerCommands(object):
|
|||||||
|
|
||||||
for flav in flavors:
|
for flav in flavors:
|
||||||
print "%-18s\t%-20s\t%s" % (
|
print "%-18s\t%-20s\t%s" % (
|
||||||
flav['id'],
|
flav['id'],
|
||||||
flav['label'],
|
flav['label'],
|
||||||
flav['description'])
|
flav['description'])
|
||||||
|
|
||||||
def flavor_create(self, label, desc):
|
def flavor_create(self, label, desc):
|
||||||
# TODO(renukaapte) flavor name must be unique
|
# TODO(renukaapte) flavor name must be unique
|
||||||
@ -487,10 +498,10 @@ class StorageManagerCommands(object):
|
|||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
|
||||||
print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (_('id'),
|
print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (_('id'),
|
||||||
_('Flavor id'),
|
_('Flavor id'),
|
||||||
_('SR UUID'),
|
_('SR UUID'),
|
||||||
_('SR Type'),
|
_('SR Type'),
|
||||||
_('Config Parameters'),)
|
_('Config Parameters'),)
|
||||||
|
|
||||||
for b in backends:
|
for b in backends:
|
||||||
print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (b['id'],
|
print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (b['id'],
|
||||||
@ -516,8 +527,8 @@ class StorageManagerCommands(object):
|
|||||||
print "error: %s" % ex
|
print "error: %s" % ex
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
|
||||||
config_params = " ".join(['%s=%s' %
|
config_params = " ".join(
|
||||||
(key, params[key]) for key in params])
|
['%s=%s' % (key, params[key]) for key in params])
|
||||||
|
|
||||||
if 'sr_uuid' in params:
|
if 'sr_uuid' in params:
|
||||||
sr_uuid = params['sr_uuid']
|
sr_uuid = params['sr_uuid']
|
||||||
@ -532,11 +543,12 @@ class StorageManagerCommands(object):
|
|||||||
c = raw_input('Proceed? (y/n) ')
|
c = raw_input('Proceed? (y/n) ')
|
||||||
if c == 'y' or c == 'Y':
|
if c == 'y' or c == 'Y':
|
||||||
try:
|
try:
|
||||||
db.sm_backend_conf_update(ctxt, backend['id'],
|
db.sm_backend_conf_update(
|
||||||
dict(created=False,
|
ctxt, backend['id'],
|
||||||
flavor_id=flavors['id'],
|
dict(created=False,
|
||||||
sr_type=sr_type,
|
flavor_id=flavors['id'],
|
||||||
config_params=config_params))
|
sr_type=sr_type,
|
||||||
|
config_params=config_params))
|
||||||
except exception.DBError, e:
|
except exception.DBError, e:
|
||||||
_db_error(e)
|
_db_error(e)
|
||||||
return
|
return
|
||||||
@ -578,10 +590,10 @@ class ConfigCommands(object):
|
|||||||
|
|
||||||
|
|
||||||
class GetLogCommands(object):
|
class GetLogCommands(object):
|
||||||
"""Get logging information"""
|
"""Get logging information."""
|
||||||
|
|
||||||
def errors(self):
|
def errors(self):
|
||||||
"""Get all of the errors from the log files"""
|
"""Get all of the errors from the log files."""
|
||||||
error_found = 0
|
error_found = 0
|
||||||
if FLAGS.logdir:
|
if FLAGS.logdir:
|
||||||
logs = [x for x in os.listdir(FLAGS.logdir) if x.endswith('.log')]
|
logs = [x for x in os.listdir(FLAGS.logdir) if x.endswith('.log')]
|
||||||
@ -601,7 +613,7 @@ class GetLogCommands(object):
|
|||||||
print "No errors in logfiles!"
|
print "No errors in logfiles!"
|
||||||
|
|
||||||
def syslog(self, num_entries=10):
|
def syslog(self, num_entries=10):
|
||||||
"""Get <num_entries> of the cinder syslog events"""
|
"""Get <num_entries> of the cinder syslog events."""
|
||||||
entries = int(num_entries)
|
entries = int(num_entries)
|
||||||
count = 0
|
count = 0
|
||||||
log_file = ''
|
log_file = ''
|
||||||
@ -692,8 +704,8 @@ def main():
|
|||||||
script_name = argv.pop(0)
|
script_name = argv.pop(0)
|
||||||
if len(argv) < 1:
|
if len(argv) < 1:
|
||||||
print _("\nOpenStack Cinder version: %(version)s (%(vcs)s)\n") % \
|
print _("\nOpenStack Cinder version: %(version)s (%(vcs)s)\n") % \
|
||||||
{'version': version.version_string(),
|
{'version': version.version_string(),
|
||||||
'vcs': version.version_string_with_vcs()}
|
'vcs': version.version_string_with_vcs()}
|
||||||
print script_name + " category action [<args>]"
|
print script_name + " category action [<args>]"
|
||||||
print _("Available categories:")
|
print _("Available categories:")
|
||||||
for k, _v in CATEGORIES:
|
for k, _v in CATEGORIES:
|
||||||
|
@ -181,14 +181,10 @@ class ViewBuilder(object):
|
|||||||
_collection_name = None
|
_collection_name = None
|
||||||
|
|
||||||
def _get_links(self, request, identifier):
|
def _get_links(self, request, identifier):
|
||||||
return [{
|
return [{"rel": "self",
|
||||||
"rel": "self",
|
"href": self._get_href_link(request, identifier), },
|
||||||
"href": self._get_href_link(request, identifier),
|
{"rel": "bookmark",
|
||||||
},
|
"href": self._get_bookmark_link(request, identifier), }]
|
||||||
{
|
|
||||||
"rel": "bookmark",
|
|
||||||
"href": self._get_bookmark_link(request, identifier),
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _get_next_link(self, request, identifier):
|
def _get_next_link(self, request, identifier):
|
||||||
"""Return href string with proper limit and marker params."""
|
"""Return href string with proper limit and marker params."""
|
||||||
|
@ -27,14 +27,15 @@ from cinder import volume
|
|||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
authorize = extensions.soft_extension_authorizer('volume',
|
authorize = extensions.soft_extension_authorizer(
|
||||||
'extended_snapshot_attributes')
|
'volume',
|
||||||
|
'extended_snapshot_attributes')
|
||||||
|
|
||||||
|
|
||||||
class ExtendedSnapshotAttributesController(wsgi.Controller):
|
class ExtendedSnapshotAttributesController(wsgi.Controller):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(ExtendedSnapshotAttributesController, self).__init__(*args,
|
super(ExtendedSnapshotAttributesController, self).__init__(*args,
|
||||||
**kwargs)
|
**kwargs)
|
||||||
self.volume_api = volume.API()
|
self.volume_api = volume.API()
|
||||||
|
|
||||||
def _get_snapshots(self, context):
|
def _get_snapshots(self, context):
|
||||||
|
@ -63,10 +63,8 @@ class QuotaClassSetsController(object):
|
|||||||
except exception.NotAuthorized:
|
except exception.NotAuthorized:
|
||||||
raise webob.exc.HTTPForbidden()
|
raise webob.exc.HTTPForbidden()
|
||||||
|
|
||||||
return self._format_quota_set(
|
return self._format_quota_set(id,
|
||||||
id,
|
QUOTAS.get_class_quotas(context, id))
|
||||||
QUOTAS.get_class_quotas(context, id)
|
|
||||||
)
|
|
||||||
|
|
||||||
@wsgi.serializers(xml=QuotaClassTemplate)
|
@wsgi.serializers(xml=QuotaClassTemplate)
|
||||||
def update(self, req, id, body):
|
def update(self, req, id, body):
|
||||||
|
@ -118,8 +118,8 @@ class Quotas(extensions.ExtensionDescriptor):
|
|||||||
resources = []
|
resources = []
|
||||||
|
|
||||||
res = extensions.ResourceExtension('os-quota-sets',
|
res = extensions.ResourceExtension('os-quota-sets',
|
||||||
QuotaSetsController(),
|
QuotaSetsController(),
|
||||||
member_actions={'defaults': 'GET'})
|
member_actions={'defaults': 'GET'})
|
||||||
resources.append(res)
|
resources.append(res)
|
||||||
|
|
||||||
return resources
|
return resources
|
||||||
|
@ -140,10 +140,10 @@ class Types_extra_specs(extensions.ExtensionDescriptor):
|
|||||||
def get_resources(self):
|
def get_resources(self):
|
||||||
resources = []
|
resources = []
|
||||||
res = extensions.ResourceExtension('extra_specs',
|
res = extensions.ResourceExtension('extra_specs',
|
||||||
VolumeTypeExtraSpecsController(),
|
VolumeTypeExtraSpecsController(),
|
||||||
parent=dict(
|
parent=dict(member_name='type',
|
||||||
member_name='type',
|
collection_name='types')
|
||||||
collection_name='types'))
|
)
|
||||||
resources.append(res)
|
resources.append(res)
|
||||||
|
|
||||||
return resources
|
return resources
|
||||||
|
@ -31,7 +31,7 @@ authorize = extensions.extension_authorizer('volume', 'types_manage')
|
|||||||
|
|
||||||
|
|
||||||
class VolumeTypesManageController(wsgi.Controller):
|
class VolumeTypesManageController(wsgi.Controller):
|
||||||
""" The volume types API controller for the OpenStack API """
|
"""The volume types API controller for the OpenStack API."""
|
||||||
|
|
||||||
_view_builder_class = views_types.ViewBuilder
|
_view_builder_class = views_types.ViewBuilder
|
||||||
|
|
||||||
@ -64,7 +64,7 @@ class VolumeTypesManageController(wsgi.Controller):
|
|||||||
|
|
||||||
@wsgi.action("delete")
|
@wsgi.action("delete")
|
||||||
def _delete(self, req, id):
|
def _delete(self, req, id):
|
||||||
""" Deletes an existing volume type """
|
"""Deletes an existing volume type."""
|
||||||
context = req.environ['cinder.context']
|
context = req.environ['cinder.context']
|
||||||
authorize(context)
|
authorize(context)
|
||||||
|
|
||||||
@ -78,7 +78,7 @@ class VolumeTypesManageController(wsgi.Controller):
|
|||||||
|
|
||||||
|
|
||||||
class Types_manage(extensions.ExtensionDescriptor):
|
class Types_manage(extensions.ExtensionDescriptor):
|
||||||
"""Types manage support"""
|
"""Types manage support."""
|
||||||
|
|
||||||
name = "TypesManage"
|
name = "TypesManage"
|
||||||
alias = "os-types-manage"
|
alias = "os-types-manage"
|
||||||
|
@ -52,7 +52,7 @@ class VolumeToImageSerializer(xmlutil.TemplateBuilder):
|
|||||||
|
|
||||||
|
|
||||||
class VolumeToImageDeserializer(wsgi.XMLDeserializer):
|
class VolumeToImageDeserializer(wsgi.XMLDeserializer):
|
||||||
"""Deserializer to handle xml-formatted requests"""
|
"""Deserializer to handle xml-formatted requests."""
|
||||||
def default(self, string):
|
def default(self, string):
|
||||||
dom = minidom.parseString(string)
|
dom = minidom.parseString(string)
|
||||||
action_node = dom.childNodes[0]
|
action_node = dom.childNodes[0]
|
||||||
|
@ -31,10 +31,11 @@ from cinder.openstack.common import log as logging
|
|||||||
from cinder import wsgi as base_wsgi
|
from cinder import wsgi as base_wsgi
|
||||||
|
|
||||||
|
|
||||||
use_forwarded_for_opt = cfg.BoolOpt('use_forwarded_for',
|
use_forwarded_for_opt = cfg.BoolOpt(
|
||||||
default=False,
|
'use_forwarded_for',
|
||||||
help='Treat X-Forwarded-For as the canonical remote address. '
|
default=False,
|
||||||
'Only enable this if you have a sanitizing proxy.')
|
help='Treat X-Forwarded-For as the canonical remote address. '
|
||||||
|
'Only enable this if you have a sanitizing proxy.')
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
FLAGS.register_opt(use_forwarded_for_opt)
|
FLAGS.register_opt(use_forwarded_for_opt)
|
||||||
|
@ -39,7 +39,7 @@ class FaultWrapper(base_wsgi.Middleware):
|
|||||||
for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError):
|
for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError):
|
||||||
FaultWrapper._status_to_type[clazz.code] = clazz
|
FaultWrapper._status_to_type[clazz.code] = clazz
|
||||||
return FaultWrapper._status_to_type.get(
|
return FaultWrapper._status_to_type.get(
|
||||||
status, webob.exc.HTTPInternalServerError)()
|
status, webob.exc.HTTPInternalServerError)()
|
||||||
|
|
||||||
def _error(self, inner, req):
|
def _error(self, inner, req):
|
||||||
LOG.exception(_("Caught error: %s"), unicode(inner))
|
LOG.exception(_("Caught error: %s"), unicode(inner))
|
||||||
|
@ -49,9 +49,10 @@ class ProjectMapper(APIMapper):
|
|||||||
p_member = parent_resource['member_name']
|
p_member = parent_resource['member_name']
|
||||||
kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection,
|
kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection,
|
||||||
p_member)
|
p_member)
|
||||||
routes.Mapper.resource(self, member_name,
|
routes.Mapper.resource(self,
|
||||||
collection_name,
|
member_name,
|
||||||
**kwargs)
|
collection_name,
|
||||||
|
**kwargs)
|
||||||
|
|
||||||
|
|
||||||
class APIRouter(base_wsgi.Router):
|
class APIRouter(base_wsgi.Router):
|
||||||
|
@ -16,19 +16,19 @@
|
|||||||
"""The hosts admin extension."""
|
"""The hosts admin extension."""
|
||||||
|
|
||||||
import webob.exc
|
import webob.exc
|
||||||
from xml.dom import minidom
|
|
||||||
from xml.parsers import expat
|
|
||||||
|
|
||||||
from cinder.api.openstack import extensions
|
from cinder.api.openstack import extensions
|
||||||
from cinder.api.openstack import wsgi
|
from cinder.api.openstack import wsgi
|
||||||
from cinder.api.openstack import xmlutil
|
from cinder.api.openstack import xmlutil
|
||||||
from cinder.volume import api as volume_api
|
|
||||||
from cinder import db
|
from cinder import db
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder import flags
|
from cinder import flags
|
||||||
from cinder.openstack.common import log as logging
|
from cinder.openstack.common import log as logging
|
||||||
from cinder.openstack.common import timeutils
|
from cinder.openstack.common import timeutils
|
||||||
from cinder import utils
|
from cinder import utils
|
||||||
|
from cinder.volume import api as volume_api
|
||||||
|
from xml.dom import minidom
|
||||||
|
from xml.parsers import expat
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -176,8 +176,9 @@ class HostController(object):
|
|||||||
context = req.environ['cinder.context']
|
context = req.environ['cinder.context']
|
||||||
state = "enabled" if enabled else "disabled"
|
state = "enabled" if enabled else "disabled"
|
||||||
LOG.audit(_("Setting host %(host)s to %(state)s.") % locals())
|
LOG.audit(_("Setting host %(host)s to %(state)s.") % locals())
|
||||||
result = self.api.set_host_enabled(context, host=host,
|
result = self.api.set_host_enabled(context,
|
||||||
enabled=enabled)
|
host=host,
|
||||||
|
enabled=enabled)
|
||||||
if result not in ("enabled", "disabled"):
|
if result not in ("enabled", "disabled"):
|
||||||
# An error message was returned
|
# An error message was returned
|
||||||
raise webob.exc.HTTPBadRequest(explanation=result)
|
raise webob.exc.HTTPBadRequest(explanation=result)
|
||||||
@ -230,13 +231,14 @@ class HostController(object):
|
|||||||
(snap_count, snap_sum) = db.snapshot_data_get_for_project(
|
(snap_count, snap_sum) = db.snapshot_data_get_for_project(
|
||||||
context,
|
context,
|
||||||
project_id)
|
project_id)
|
||||||
resources.append({'resource':
|
resources.append(
|
||||||
{'host': host,
|
{'resource':
|
||||||
'project': project_id,
|
{'host': host,
|
||||||
'volume_count': str(count),
|
'project': project_id,
|
||||||
'total_volume_gb': str(sum),
|
'volume_count': str(count),
|
||||||
'snapshot_count': str(snap_count),
|
'total_volume_gb': str(sum),
|
||||||
'total_snapshot_gb': str(snap_sum)}})
|
'snapshot_count': str(snap_count),
|
||||||
|
'total_snapshot_gb': str(snap_sum)}})
|
||||||
snap_count_total += int(snap_count)
|
snap_count_total += int(snap_count)
|
||||||
snap_sum_total += int(snap_sum)
|
snap_sum_total += int(snap_sum)
|
||||||
resources[0]['resource']['snapshot_count'] = str(snap_count_total)
|
resources[0]['resource']['snapshot_count'] = str(snap_count_total)
|
||||||
@ -254,8 +256,11 @@ class Hosts(extensions.ExtensionDescriptor):
|
|||||||
|
|
||||||
def get_resources(self):
|
def get_resources(self):
|
||||||
resources = [extensions.ResourceExtension('os-hosts',
|
resources = [extensions.ResourceExtension('os-hosts',
|
||||||
HostController(),
|
HostController(),
|
||||||
collection_actions={'update': 'PUT'},
|
collection_actions={
|
||||||
member_actions={"startup": "GET", "shutdown": "GET",
|
'update': 'PUT'},
|
||||||
"reboot": "GET"})]
|
member_actions={
|
||||||
|
'startup': 'GET',
|
||||||
|
'shutdown': 'GET',
|
||||||
|
'reboot': 'GET'})]
|
||||||
return resources
|
return resources
|
||||||
|
@ -79,7 +79,7 @@ class Request(webob.Request):
|
|||||||
content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES)
|
content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES)
|
||||||
|
|
||||||
self.environ['cinder.best_content_type'] = (content_type or
|
self.environ['cinder.best_content_type'] = (content_type or
|
||||||
'application/json')
|
'application/json')
|
||||||
|
|
||||||
return self.environ['cinder.best_content_type']
|
return self.environ['cinder.best_content_type']
|
||||||
|
|
||||||
@ -577,8 +577,9 @@ class ResourceExceptionHandler(object):
|
|||||||
code=ex_value.code, explanation=unicode(ex_value)))
|
code=ex_value.code, explanation=unicode(ex_value)))
|
||||||
elif isinstance(ex_value, TypeError):
|
elif isinstance(ex_value, TypeError):
|
||||||
exc_info = (ex_type, ex_value, ex_traceback)
|
exc_info = (ex_type, ex_value, ex_traceback)
|
||||||
LOG.error(_('Exception handling resource: %s') % ex_value,
|
LOG.error(_(
|
||||||
exc_info=exc_info)
|
'Exception handling resource: %s') %
|
||||||
|
ex_value, exc_info=exc_info)
|
||||||
raise Fault(webob.exc.HTTPBadRequest())
|
raise Fault(webob.exc.HTTPBadRequest())
|
||||||
elif isinstance(ex_value, Fault):
|
elif isinstance(ex_value, Fault):
|
||||||
LOG.info(_("Fault thrown: %s"), unicode(ex_value))
|
LOG.info(_("Fault thrown: %s"), unicode(ex_value))
|
||||||
@ -901,7 +902,7 @@ class Resource(wsgi.Application):
|
|||||||
meth = getattr(self.controller, action)
|
meth = getattr(self.controller, action)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
if (not self.wsgi_actions or
|
if (not self.wsgi_actions or
|
||||||
action not in ['action', 'create', 'delete']):
|
action not in ['action', 'create', 'delete']):
|
||||||
# Propagate the error
|
# Propagate the error
|
||||||
raise
|
raise
|
||||||
else:
|
else:
|
||||||
@ -1038,17 +1039,16 @@ class Controller(object):
|
|||||||
class Fault(webob.exc.HTTPException):
|
class Fault(webob.exc.HTTPException):
|
||||||
"""Wrap webob.exc.HTTPException to provide API friendly response."""
|
"""Wrap webob.exc.HTTPException to provide API friendly response."""
|
||||||
|
|
||||||
_fault_names = {
|
_fault_names = {400: "badRequest",
|
||||||
400: "badRequest",
|
401: "unauthorized",
|
||||||
401: "unauthorized",
|
403: "forbidden",
|
||||||
403: "forbidden",
|
404: "itemNotFound",
|
||||||
404: "itemNotFound",
|
405: "badMethod",
|
||||||
405: "badMethod",
|
409: "conflictingRequest",
|
||||||
409: "conflictingRequest",
|
413: "overLimit",
|
||||||
413: "overLimit",
|
415: "badMediaType",
|
||||||
415: "badMediaType",
|
501: "notImplemented",
|
||||||
501: "notImplemented",
|
503: "serviceUnavailable"}
|
||||||
503: "serviceUnavailable"}
|
|
||||||
|
|
||||||
def __init__(self, exception):
|
def __init__(self, exception):
|
||||||
"""Create a Fault for the given webob.exc.exception."""
|
"""Create a Fault for the given webob.exc.exception."""
|
||||||
|
@ -24,8 +24,9 @@ from cinder.openstack.common import log as logging
|
|||||||
|
|
||||||
|
|
||||||
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
|
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
|
||||||
_option_header_piece_re = re.compile(r';\s*([^\s;=]+|%s)\s*'
|
_option_header_piece_re = re.compile(
|
||||||
r'(?:=\s*([^;]+|%s))?\s*' %
|
r';\s*([^\s;=]+|%s)\s*'
|
||||||
|
r'(?:=\s*([^;]+|%s))?\s*' %
|
||||||
(_quoted_string_re, _quoted_string_re))
|
(_quoted_string_re, _quoted_string_re))
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -171,8 +172,7 @@ class URLMap(paste.urlmap.URLMap):
|
|||||||
for (domain, app_url), app in self.applications:
|
for (domain, app_url), app in self.applications:
|
||||||
if domain and domain != host and domain != host + ':' + port:
|
if domain and domain != host and domain != host + ':' + port:
|
||||||
continue
|
continue
|
||||||
if (path_info == app_url
|
if (path_info == app_url or path_info.startswith(app_url + '/')):
|
||||||
or path_info.startswith(app_url + '/')):
|
|
||||||
return app, app_url
|
return app, app_url
|
||||||
|
|
||||||
return None, None
|
return None, None
|
||||||
@ -274,7 +274,7 @@ class URLMap(paste.urlmap.URLMap):
|
|||||||
|
|
||||||
if not mime_type or not app:
|
if not mime_type or not app:
|
||||||
possible_mime_type, possible_app = self._accept_strategy(
|
possible_mime_type, possible_app = self._accept_strategy(
|
||||||
host, port, environ, supported_content_types)
|
host, port, environ, supported_content_types)
|
||||||
if possible_mime_type and not mime_type:
|
if possible_mime_type and not mime_type:
|
||||||
mime_type = possible_mime_type
|
mime_type = possible_mime_type
|
||||||
if possible_app and not app:
|
if possible_app and not app:
|
||||||
|
@ -44,8 +44,8 @@ class APIRouter(cinder.api.openstack.APIRouter):
|
|||||||
def _setup_routes(self, mapper, ext_mgr):
|
def _setup_routes(self, mapper, ext_mgr):
|
||||||
self.resources['versions'] = versions.create_resource()
|
self.resources['versions'] = versions.create_resource()
|
||||||
mapper.connect("versions", "/",
|
mapper.connect("versions", "/",
|
||||||
controller=self.resources['versions'],
|
controller=self.resources['versions'],
|
||||||
action='show')
|
action='show')
|
||||||
|
|
||||||
mapper.redirect("", "/")
|
mapper.redirect("", "/")
|
||||||
|
|
||||||
|
@ -164,15 +164,17 @@ class SnapshotsController(wsgi.Controller):
|
|||||||
raise exception.InvalidParameterValue(err=msg)
|
raise exception.InvalidParameterValue(err=msg)
|
||||||
|
|
||||||
if utils.bool_from_str(force):
|
if utils.bool_from_str(force):
|
||||||
new_snapshot = self.volume_api.create_snapshot_force(context,
|
new_snapshot = self.volume_api.create_snapshot_force(
|
||||||
volume,
|
context,
|
||||||
snapshot.get('display_name'),
|
volume,
|
||||||
snapshot.get('display_description'))
|
snapshot.get('display_name'),
|
||||||
|
snapshot.get('display_description'))
|
||||||
else:
|
else:
|
||||||
new_snapshot = self.volume_api.create_snapshot(context,
|
new_snapshot = self.volume_api.create_snapshot(
|
||||||
volume,
|
context,
|
||||||
snapshot.get('display_name'),
|
volume,
|
||||||
snapshot.get('display_description'))
|
snapshot.get('display_name'),
|
||||||
|
snapshot.get('display_description'))
|
||||||
|
|
||||||
retval = _translate_snapshot_detail_view(context, new_snapshot)
|
retval = _translate_snapshot_detail_view(context, new_snapshot)
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
""" The volume type & volume types extra specs extension"""
|
"""The volume type & volume types extra specs extension."""
|
||||||
|
|
||||||
from webob import exc
|
from webob import exc
|
||||||
|
|
||||||
@ -50,20 +50,20 @@ class VolumeTypesTemplate(xmlutil.TemplateBuilder):
|
|||||||
|
|
||||||
|
|
||||||
class VolumeTypesController(wsgi.Controller):
|
class VolumeTypesController(wsgi.Controller):
|
||||||
""" The volume types API controller for the OpenStack API """
|
"""The volume types API controller for the OpenStack API."""
|
||||||
|
|
||||||
_view_builder_class = views_types.ViewBuilder
|
_view_builder_class = views_types.ViewBuilder
|
||||||
|
|
||||||
@wsgi.serializers(xml=VolumeTypesTemplate)
|
@wsgi.serializers(xml=VolumeTypesTemplate)
|
||||||
def index(self, req):
|
def index(self, req):
|
||||||
""" Returns the list of volume types """
|
"""Returns the list of volume types."""
|
||||||
context = req.environ['cinder.context']
|
context = req.environ['cinder.context']
|
||||||
vol_types = volume_types.get_all_types(context).values()
|
vol_types = volume_types.get_all_types(context).values()
|
||||||
return self._view_builder.index(req, vol_types)
|
return self._view_builder.index(req, vol_types)
|
||||||
|
|
||||||
@wsgi.serializers(xml=VolumeTypeTemplate)
|
@wsgi.serializers(xml=VolumeTypeTemplate)
|
||||||
def show(self, req, id):
|
def show(self, req, id):
|
||||||
""" Return a single volume type item """
|
"""Return a single volume type item."""
|
||||||
context = req.environ['cinder.context']
|
context = req.environ['cinder.context']
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -296,7 +296,7 @@ class VolumeController(wsgi.Controller):
|
|||||||
if req_volume_type:
|
if req_volume_type:
|
||||||
try:
|
try:
|
||||||
kwargs['volume_type'] = volume_types.get_volume_type_by_name(
|
kwargs['volume_type'] = volume_types.get_volume_type_by_name(
|
||||||
context, req_volume_type)
|
context, req_volume_type)
|
||||||
except exception.VolumeTypeNotFound:
|
except exception.VolumeTypeNotFound:
|
||||||
explanation = 'Volume type not found.'
|
explanation = 'Volume type not found.'
|
||||||
raise exc.HTTPNotFound(explanation=explanation)
|
raise exc.HTTPNotFound(explanation=explanation)
|
||||||
@ -394,7 +394,7 @@ def remove_invalid_options(context, search_options, allowed_search_options):
|
|||||||
return
|
return
|
||||||
# Otherwise, strip out all unknown options
|
# Otherwise, strip out all unknown options
|
||||||
unknown_options = [opt for opt in search_options
|
unknown_options = [opt for opt in search_options
|
||||||
if opt not in allowed_search_options]
|
if opt not in allowed_search_options]
|
||||||
bad_options = ", ".join(unknown_options)
|
bad_options = ", ".join(unknown_options)
|
||||||
log_msg = _("Removing options '%(bad_options)s' from query") % locals()
|
log_msg = _("Removing options '%(bad_options)s' from query") % locals()
|
||||||
LOG.debug(log_msg)
|
LOG.debug(log_msg)
|
||||||
|
@ -44,8 +44,8 @@ class APIRouter(cinder.api.openstack.APIRouter):
|
|||||||
def _setup_routes(self, mapper, ext_mgr):
|
def _setup_routes(self, mapper, ext_mgr):
|
||||||
self.resources['versions'] = versions.create_resource()
|
self.resources['versions'] = versions.create_resource()
|
||||||
mapper.connect("versions", "/",
|
mapper.connect("versions", "/",
|
||||||
controller=self.resources['versions'],
|
controller=self.resources['versions'],
|
||||||
action='show')
|
action='show')
|
||||||
|
|
||||||
mapper.redirect("", "/")
|
mapper.redirect("", "/")
|
||||||
|
|
||||||
|
@ -164,15 +164,17 @@ class SnapshotsController(wsgi.Controller):
|
|||||||
raise exception.InvalidParameterValue(err=msg)
|
raise exception.InvalidParameterValue(err=msg)
|
||||||
|
|
||||||
if utils.bool_from_str(force):
|
if utils.bool_from_str(force):
|
||||||
new_snapshot = self.volume_api.create_snapshot_force(context,
|
new_snapshot = self.volume_api.create_snapshot_force(
|
||||||
volume,
|
context,
|
||||||
snapshot.get('display_name'),
|
volume,
|
||||||
snapshot.get('display_description'))
|
snapshot.get('display_name'),
|
||||||
|
snapshot.get('display_description'))
|
||||||
else:
|
else:
|
||||||
new_snapshot = self.volume_api.create_snapshot(context,
|
new_snapshot = self.volume_api.create_snapshot(
|
||||||
volume,
|
context,
|
||||||
snapshot.get('display_name'),
|
volume,
|
||||||
snapshot.get('display_description'))
|
snapshot.get('display_name'),
|
||||||
|
snapshot.get('display_description'))
|
||||||
|
|
||||||
retval = _translate_snapshot_detail_view(context, new_snapshot)
|
retval = _translate_snapshot_detail_view(context, new_snapshot)
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
""" The volume type & volume types extra specs extension"""
|
"""The volume type & volume types extra specs extension."""
|
||||||
|
|
||||||
from webob import exc
|
from webob import exc
|
||||||
|
|
||||||
@ -50,20 +50,20 @@ class VolumeTypesTemplate(xmlutil.TemplateBuilder):
|
|||||||
|
|
||||||
|
|
||||||
class VolumeTypesController(wsgi.Controller):
|
class VolumeTypesController(wsgi.Controller):
|
||||||
""" The volume types API controller for the OpenStack API """
|
"""The volume types API controller for the OpenStack API."""
|
||||||
|
|
||||||
_view_builder_class = views_types.ViewBuilder
|
_view_builder_class = views_types.ViewBuilder
|
||||||
|
|
||||||
@wsgi.serializers(xml=VolumeTypesTemplate)
|
@wsgi.serializers(xml=VolumeTypesTemplate)
|
||||||
def index(self, req):
|
def index(self, req):
|
||||||
""" Returns the list of volume types """
|
"""Returns the list of volume types."""
|
||||||
context = req.environ['cinder.context']
|
context = req.environ['cinder.context']
|
||||||
vol_types = volume_types.get_all_types(context).values()
|
vol_types = volume_types.get_all_types(context).values()
|
||||||
return self._view_builder.index(req, vol_types)
|
return self._view_builder.index(req, vol_types)
|
||||||
|
|
||||||
@wsgi.serializers(xml=VolumeTypeTemplate)
|
@wsgi.serializers(xml=VolumeTypeTemplate)
|
||||||
def show(self, req, id):
|
def show(self, req, id):
|
||||||
""" Return a single volume type item """
|
"""Return a single volume type item."""
|
||||||
context = req.environ['cinder.context']
|
context = req.environ['cinder.context']
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -291,7 +291,7 @@ class VolumeController(wsgi.Controller):
|
|||||||
if req_volume_type:
|
if req_volume_type:
|
||||||
try:
|
try:
|
||||||
kwargs['volume_type'] = volume_types.get_volume_type_by_name(
|
kwargs['volume_type'] = volume_types.get_volume_type_by_name(
|
||||||
context, req_volume_type)
|
context, req_volume_type)
|
||||||
except exception.VolumeTypeNotFound:
|
except exception.VolumeTypeNotFound:
|
||||||
explanation = 'Volume type not found.'
|
explanation = 'Volume type not found.'
|
||||||
raise exc.HTTPNotFound(explanation=explanation)
|
raise exc.HTTPNotFound(explanation=explanation)
|
||||||
@ -389,7 +389,7 @@ def remove_invalid_options(context, search_options, allowed_search_options):
|
|||||||
return
|
return
|
||||||
# Otherwise, strip out all unknown options
|
# Otherwise, strip out all unknown options
|
||||||
unknown_options = [opt for opt in search_options
|
unknown_options = [opt for opt in search_options
|
||||||
if opt not in allowed_search_options]
|
if opt not in allowed_search_options]
|
||||||
bad_options = ", ".join(unknown_options)
|
bad_options = ", ".join(unknown_options)
|
||||||
log_msg = _("Removing options '%(bad_options)s' from query") % locals()
|
log_msg = _("Removing options '%(bad_options)s' from query") % locals()
|
||||||
LOG.debug(log_msg)
|
LOG.debug(log_msg)
|
||||||
|
@ -23,8 +23,8 @@ class ViewBuilder(common.ViewBuilder):
|
|||||||
def show(self, request, volume_type, brief=False):
|
def show(self, request, volume_type, brief=False):
|
||||||
"""Trim away extraneous volume type attributes."""
|
"""Trim away extraneous volume type attributes."""
|
||||||
trimmed = dict(id=volume_type.get('id'),
|
trimmed = dict(id=volume_type.get('id'),
|
||||||
name=volume_type.get('name'),
|
name=volume_type.get('name'),
|
||||||
extra_specs=volume_type.get('extra_specs'))
|
extra_specs=volume_type.get('extra_specs'))
|
||||||
return trimmed if brief else dict(volume_type=trimmed)
|
return trimmed if brief else dict(volume_type=trimmed)
|
||||||
|
|
||||||
def index(self, request, volume_types):
|
def index(self, request, volume_types):
|
||||||
|
@ -38,14 +38,9 @@ class ViewBuilder(object):
|
|||||||
version_objs.append({
|
version_objs.append({
|
||||||
"id": version['id'],
|
"id": version['id'],
|
||||||
"status": version['status'],
|
"status": version['status'],
|
||||||
"links": [
|
"links": [{"rel": "self",
|
||||||
{
|
"href": self.generate_href(req.path), }, ],
|
||||||
"rel": "self",
|
"media-types": version['media-types'], })
|
||||||
"href": self.generate_href(req.path),
|
|
||||||
},
|
|
||||||
],
|
|
||||||
"media-types": version['media-types'],
|
|
||||||
})
|
|
||||||
|
|
||||||
return dict(choices=version_objs)
|
return dict(choices=version_objs)
|
||||||
|
|
||||||
@ -57,8 +52,7 @@ class ViewBuilder(object):
|
|||||||
"id": version['id'],
|
"id": version['id'],
|
||||||
"status": version['status'],
|
"status": version['status'],
|
||||||
"updated": version['updated'],
|
"updated": version['updated'],
|
||||||
"links": self._build_links(version),
|
"links": self._build_links(version), })
|
||||||
})
|
|
||||||
|
|
||||||
return dict(versions=version_objs)
|
return dict(versions=version_objs)
|
||||||
|
|
||||||
@ -66,20 +60,15 @@ class ViewBuilder(object):
|
|||||||
reval = copy.deepcopy(version)
|
reval = copy.deepcopy(version)
|
||||||
reval['links'].insert(0, {
|
reval['links'].insert(0, {
|
||||||
"rel": "self",
|
"rel": "self",
|
||||||
"href": self.base_url.rstrip('/') + '/',
|
"href": self.base_url.rstrip('/') + '/', })
|
||||||
})
|
|
||||||
return dict(version=reval)
|
return dict(version=reval)
|
||||||
|
|
||||||
def _build_links(self, version_data):
|
def _build_links(self, version_data):
|
||||||
"""Generate a container of links that refer to the provided version."""
|
"""Generate a container of links that refer to the provided version."""
|
||||||
href = self.generate_href()
|
href = self.generate_href()
|
||||||
|
|
||||||
links = [
|
links = [{'rel': 'self',
|
||||||
{
|
'href': href, }, ]
|
||||||
"rel": "self",
|
|
||||||
"href": href,
|
|
||||||
},
|
|
||||||
]
|
|
||||||
|
|
||||||
return links
|
return links
|
||||||
|
|
||||||
|
@ -26,8 +26,7 @@ LOG = logging.getLogger(__name__)
|
|||||||
deprecate_opts = [
|
deprecate_opts = [
|
||||||
cfg.BoolOpt('fatal_deprecations',
|
cfg.BoolOpt('fatal_deprecations',
|
||||||
default=False,
|
default=False,
|
||||||
help='make deprecations fatal')
|
help='make deprecations fatal')]
|
||||||
]
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
FLAGS.register_opts(deprecate_opts)
|
FLAGS.register_opts(deprecate_opts)
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ class RequestContext(object):
|
|||||||
"""
|
"""
|
||||||
if kwargs:
|
if kwargs:
|
||||||
LOG.warn(_('Arguments dropped when creating context: %s') %
|
LOG.warn(_('Arguments dropped when creating context: %s') %
|
||||||
str(kwargs))
|
str(kwargs))
|
||||||
|
|
||||||
self.user_id = user_id
|
self.user_id = user_id
|
||||||
self.project_id = project_id
|
self.project_id = project_id
|
||||||
|
@ -61,8 +61,7 @@ db_opts = [
|
|||||||
help='Template string to be used to generate volume names'),
|
help='Template string to be used to generate volume names'),
|
||||||
cfg.StrOpt('snapshot_name_template',
|
cfg.StrOpt('snapshot_name_template',
|
||||||
default='snapshot-%s',
|
default='snapshot-%s',
|
||||||
help='Template string to be used to generate snapshot names'),
|
help='Template string to be used to generate snapshot names'), ]
|
||||||
]
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
FLAGS.register_opts(db_opts)
|
FLAGS.register_opts(db_opts)
|
||||||
@ -155,8 +154,9 @@ def migration_get(context, migration_id):
|
|||||||
|
|
||||||
def migration_get_by_instance_and_status(context, instance_uuid, status):
|
def migration_get_by_instance_and_status(context, instance_uuid, status):
|
||||||
"""Finds a migration by the instance uuid its migrating."""
|
"""Finds a migration by the instance uuid its migrating."""
|
||||||
return IMPL.migration_get_by_instance_and_status(context, instance_uuid,
|
return IMPL.migration_get_by_instance_and_status(context,
|
||||||
status)
|
instance_uuid,
|
||||||
|
status)
|
||||||
|
|
||||||
|
|
||||||
def migration_get_all_unconfirmed(context, confirm_window):
|
def migration_get_all_unconfirmed(context, confirm_window):
|
||||||
@ -378,12 +378,14 @@ def volume_type_extra_specs_delete(context, volume_type_id, key):
|
|||||||
IMPL.volume_type_extra_specs_delete(context, volume_type_id, key)
|
IMPL.volume_type_extra_specs_delete(context, volume_type_id, key)
|
||||||
|
|
||||||
|
|
||||||
def volume_type_extra_specs_update_or_create(context, volume_type_id,
|
def volume_type_extra_specs_update_or_create(context,
|
||||||
extra_specs):
|
volume_type_id,
|
||||||
|
extra_specs):
|
||||||
"""Create or update volume type extra specs. This adds or modifies the
|
"""Create or update volume type extra specs. This adds or modifies the
|
||||||
key/value pairs specified in the extra specs dict argument"""
|
key/value pairs specified in the extra specs dict argument"""
|
||||||
IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id,
|
IMPL.volume_type_extra_specs_update_or_create(context,
|
||||||
extra_specs)
|
volume_type_id,
|
||||||
|
extra_specs)
|
||||||
|
|
||||||
|
|
||||||
###################
|
###################
|
||||||
@ -391,8 +393,10 @@ def volume_type_extra_specs_update_or_create(context, volume_type_id,
|
|||||||
|
|
||||||
def volume_glance_metadata_create(context, volume_id, key, value):
|
def volume_glance_metadata_create(context, volume_id, key, value):
|
||||||
"""Update the Glance metadata for the specified volume."""
|
"""Update the Glance metadata for the specified volume."""
|
||||||
return IMPL.volume_glance_metadata_create(context, volume_id,
|
return IMPL.volume_glance_metadata_create(context,
|
||||||
key, value)
|
volume_id,
|
||||||
|
key,
|
||||||
|
value)
|
||||||
|
|
||||||
|
|
||||||
def volume_glance_metadata_get(context, volume_id):
|
def volume_glance_metadata_get(context, volume_id):
|
||||||
|
@ -20,17 +20,12 @@
|
|||||||
"""Implementation of SQLAlchemy backend."""
|
"""Implementation of SQLAlchemy backend."""
|
||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
import functools
|
|
||||||
import uuid
|
import uuid
|
||||||
import warnings
|
import warnings
|
||||||
|
|
||||||
from sqlalchemy.exc import IntegrityError
|
from sqlalchemy.exc import IntegrityError
|
||||||
from sqlalchemy import or_
|
from sqlalchemy import or_
|
||||||
from sqlalchemy.orm import joinedload
|
from sqlalchemy.orm import joinedload
|
||||||
from sqlalchemy.orm import joinedload_all
|
|
||||||
from sqlalchemy.sql.expression import asc
|
|
||||||
from sqlalchemy.sql.expression import desc
|
|
||||||
from sqlalchemy.sql.expression import literal_column
|
|
||||||
from sqlalchemy.sql.expression import literal_column
|
from sqlalchemy.sql.expression import literal_column
|
||||||
from sqlalchemy.sql import func
|
from sqlalchemy.sql import func
|
||||||
|
|
||||||
@ -179,7 +174,7 @@ def model_query(context, *args, **kwargs):
|
|||||||
query = query.filter_by(deleted=True)
|
query = query.filter_by(deleted=True)
|
||||||
else:
|
else:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
_("Unrecognized read_deleted value '%s'") % read_deleted)
|
_("Unrecognized read_deleted value '%s'") % read_deleted)
|
||||||
|
|
||||||
if project_only and is_user_context(context):
|
if project_only and is_user_context(context):
|
||||||
query = query.filter_by(project_id=context.project_id)
|
query = query.filter_by(project_id=context.project_id)
|
||||||
@ -242,9 +237,12 @@ def service_destroy(context, service_id):
|
|||||||
|
|
||||||
@require_admin_context
|
@require_admin_context
|
||||||
def service_get(context, service_id, session=None):
|
def service_get(context, service_id, session=None):
|
||||||
result = model_query(context, models.Service, session=session).\
|
result = model_query(
|
||||||
filter_by(id=service_id).\
|
context,
|
||||||
first()
|
models.Service,
|
||||||
|
session=session).\
|
||||||
|
filter_by(id=service_id).\
|
||||||
|
first()
|
||||||
if not result:
|
if not result:
|
||||||
raise exception.ServiceNotFound(service_id=service_id)
|
raise exception.ServiceNotFound(service_id=service_id)
|
||||||
|
|
||||||
@ -263,19 +261,21 @@ def service_get_all(context, disabled=None):
|
|||||||
|
|
||||||
@require_admin_context
|
@require_admin_context
|
||||||
def service_get_all_by_topic(context, topic):
|
def service_get_all_by_topic(context, topic):
|
||||||
return model_query(context, models.Service, read_deleted="no").\
|
return model_query(
|
||||||
filter_by(disabled=False).\
|
context, models.Service, read_deleted="no").\
|
||||||
filter_by(topic=topic).\
|
filter_by(disabled=False).\
|
||||||
all()
|
filter_by(topic=topic).\
|
||||||
|
all()
|
||||||
|
|
||||||
|
|
||||||
@require_admin_context
|
@require_admin_context
|
||||||
def service_get_by_host_and_topic(context, host, topic):
|
def service_get_by_host_and_topic(context, host, topic):
|
||||||
result = model_query(context, models.Service, read_deleted="no").\
|
result = model_query(
|
||||||
filter_by(disabled=False).\
|
context, models.Service, read_deleted="no").\
|
||||||
filter_by(host=host).\
|
filter_by(disabled=False).\
|
||||||
filter_by(topic=topic).\
|
filter_by(host=host).\
|
||||||
first()
|
filter_by(topic=topic).\
|
||||||
|
first()
|
||||||
if not result:
|
if not result:
|
||||||
raise exception.ServiceNotFound(host=host, topic=topic)
|
raise exception.ServiceNotFound(host=host, topic=topic)
|
||||||
return result
|
return result
|
||||||
@ -283,9 +283,10 @@ def service_get_by_host_and_topic(context, host, topic):
|
|||||||
|
|
||||||
@require_admin_context
|
@require_admin_context
|
||||||
def service_get_all_by_host(context, host):
|
def service_get_all_by_host(context, host):
|
||||||
return model_query(context, models.Service, read_deleted="no").\
|
return model_query(
|
||||||
filter_by(host=host).\
|
context, models.Service, read_deleted="no").\
|
||||||
all()
|
filter_by(host=host).\
|
||||||
|
all()
|
||||||
|
|
||||||
|
|
||||||
@require_admin_context
|
@require_admin_context
|
||||||
@ -294,11 +295,11 @@ def _service_get_all_topic_subquery(context, session, topic, subq, label):
|
|||||||
return model_query(context, models.Service,
|
return model_query(context, models.Service,
|
||||||
func.coalesce(sort_value, 0),
|
func.coalesce(sort_value, 0),
|
||||||
session=session, read_deleted="no").\
|
session=session, read_deleted="no").\
|
||||||
filter_by(topic=topic).\
|
filter_by(topic=topic).\
|
||||||
filter_by(disabled=False).\
|
filter_by(disabled=False).\
|
||||||
outerjoin((subq, models.Service.host == subq.c.host)).\
|
outerjoin((subq, models.Service.host == subq.c.host)).\
|
||||||
order_by(sort_value).\
|
order_by(sort_value).\
|
||||||
all()
|
all()
|
||||||
|
|
||||||
|
|
||||||
@require_admin_context
|
@require_admin_context
|
||||||
@ -310,8 +311,8 @@ def service_get_all_volume_sorted(context):
|
|||||||
subq = model_query(context, models.Volume.host,
|
subq = model_query(context, models.Volume.host,
|
||||||
func.sum(models.Volume.size).label(label),
|
func.sum(models.Volume.size).label(label),
|
||||||
session=session, read_deleted="no").\
|
session=session, read_deleted="no").\
|
||||||
group_by(models.Volume.host).\
|
group_by(models.Volume.host).\
|
||||||
subquery()
|
subquery()
|
||||||
return _service_get_all_topic_subquery(context,
|
return _service_get_all_topic_subquery(context,
|
||||||
session,
|
session,
|
||||||
topic,
|
topic,
|
||||||
@ -322,9 +323,9 @@ def service_get_all_volume_sorted(context):
|
|||||||
@require_admin_context
|
@require_admin_context
|
||||||
def service_get_by_args(context, host, binary):
|
def service_get_by_args(context, host, binary):
|
||||||
result = model_query(context, models.Service).\
|
result = model_query(context, models.Service).\
|
||||||
filter_by(host=host).\
|
filter_by(host=host).\
|
||||||
filter_by(binary=binary).\
|
filter_by(binary=binary).\
|
||||||
first()
|
first()
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
raise exception.HostBinaryNotFound(host=host, binary=binary)
|
raise exception.HostBinaryNotFound(host=host, binary=binary)
|
||||||
@ -390,8 +391,8 @@ def _dict_with_extra_specs(inst_type_query):
|
|||||||
@require_admin_context
|
@require_admin_context
|
||||||
def iscsi_target_count_by_host(context, host):
|
def iscsi_target_count_by_host(context, host):
|
||||||
return model_query(context, models.IscsiTarget).\
|
return model_query(context, models.IscsiTarget).\
|
||||||
filter_by(host=host).\
|
filter_by(host=host).\
|
||||||
count()
|
count()
|
||||||
|
|
||||||
|
|
||||||
@require_admin_context
|
@require_admin_context
|
||||||
@ -414,9 +415,9 @@ def iscsi_target_create_safe(context, values):
|
|||||||
def quota_get(context, project_id, resource, session=None):
|
def quota_get(context, project_id, resource, session=None):
|
||||||
result = model_query(context, models.Quota, session=session,
|
result = model_query(context, models.Quota, session=session,
|
||||||
read_deleted="no").\
|
read_deleted="no").\
|
||||||
filter_by(project_id=project_id).\
|
filter_by(project_id=project_id).\
|
||||||
filter_by(resource=resource).\
|
filter_by(resource=resource).\
|
||||||
first()
|
first()
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
raise exception.ProjectQuotaNotFound(project_id=project_id)
|
raise exception.ProjectQuotaNotFound(project_id=project_id)
|
||||||
@ -429,8 +430,8 @@ def quota_get_all_by_project(context, project_id):
|
|||||||
authorize_project_context(context, project_id)
|
authorize_project_context(context, project_id)
|
||||||
|
|
||||||
rows = model_query(context, models.Quota, read_deleted="no").\
|
rows = model_query(context, models.Quota, read_deleted="no").\
|
||||||
filter_by(project_id=project_id).\
|
filter_by(project_id=project_id).\
|
||||||
all()
|
all()
|
||||||
|
|
||||||
result = {'project_id': project_id}
|
result = {'project_id': project_id}
|
||||||
for row in rows:
|
for row in rows:
|
||||||
@ -473,9 +474,9 @@ def quota_destroy(context, project_id, resource):
|
|||||||
def quota_class_get(context, class_name, resource, session=None):
|
def quota_class_get(context, class_name, resource, session=None):
|
||||||
result = model_query(context, models.QuotaClass, session=session,
|
result = model_query(context, models.QuotaClass, session=session,
|
||||||
read_deleted="no").\
|
read_deleted="no").\
|
||||||
filter_by(class_name=class_name).\
|
filter_by(class_name=class_name).\
|
||||||
filter_by(resource=resource).\
|
filter_by(resource=resource).\
|
||||||
first()
|
first()
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
raise exception.QuotaClassNotFound(class_name=class_name)
|
raise exception.QuotaClassNotFound(class_name=class_name)
|
||||||
@ -488,8 +489,8 @@ def quota_class_get_all_by_name(context, class_name):
|
|||||||
authorize_quota_class_context(context, class_name)
|
authorize_quota_class_context(context, class_name)
|
||||||
|
|
||||||
rows = model_query(context, models.QuotaClass, read_deleted="no").\
|
rows = model_query(context, models.QuotaClass, read_deleted="no").\
|
||||||
filter_by(class_name=class_name).\
|
filter_by(class_name=class_name).\
|
||||||
all()
|
all()
|
||||||
|
|
||||||
result = {'class_name': class_name}
|
result = {'class_name': class_name}
|
||||||
for row in rows:
|
for row in rows:
|
||||||
@ -533,8 +534,8 @@ def quota_class_destroy_all_by_name(context, class_name):
|
|||||||
with session.begin():
|
with session.begin():
|
||||||
quota_classes = model_query(context, models.QuotaClass,
|
quota_classes = model_query(context, models.QuotaClass,
|
||||||
session=session, read_deleted="no").\
|
session=session, read_deleted="no").\
|
||||||
filter_by(class_name=class_name).\
|
filter_by(class_name=class_name).\
|
||||||
all()
|
all()
|
||||||
|
|
||||||
for quota_class_ref in quota_classes:
|
for quota_class_ref in quota_classes:
|
||||||
quota_class_ref.delete(session=session)
|
quota_class_ref.delete(session=session)
|
||||||
@ -547,9 +548,9 @@ def quota_class_destroy_all_by_name(context, class_name):
|
|||||||
def quota_usage_get(context, project_id, resource, session=None):
|
def quota_usage_get(context, project_id, resource, session=None):
|
||||||
result = model_query(context, models.QuotaUsage, session=session,
|
result = model_query(context, models.QuotaUsage, session=session,
|
||||||
read_deleted="no").\
|
read_deleted="no").\
|
||||||
filter_by(project_id=project_id).\
|
filter_by(project_id=project_id).\
|
||||||
filter_by(resource=resource).\
|
filter_by(resource=resource).\
|
||||||
first()
|
first()
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
raise exception.QuotaUsageNotFound(project_id=project_id)
|
raise exception.QuotaUsageNotFound(project_id=project_id)
|
||||||
@ -562,8 +563,8 @@ def quota_usage_get_all_by_project(context, project_id):
|
|||||||
authorize_project_context(context, project_id)
|
authorize_project_context(context, project_id)
|
||||||
|
|
||||||
rows = model_query(context, models.QuotaUsage, read_deleted="no").\
|
rows = model_query(context, models.QuotaUsage, read_deleted="no").\
|
||||||
filter_by(project_id=project_id).\
|
filter_by(project_id=project_id).\
|
||||||
all()
|
all()
|
||||||
|
|
||||||
result = {'project_id': project_id}
|
result = {'project_id': project_id}
|
||||||
for row in rows:
|
for row in rows:
|
||||||
@ -593,8 +594,7 @@ def quota_usage_create(context, project_id, resource, in_use, reserved,
|
|||||||
def reservation_get(context, uuid, session=None):
|
def reservation_get(context, uuid, session=None):
|
||||||
result = model_query(context, models.Reservation, session=session,
|
result = model_query(context, models.Reservation, session=session,
|
||||||
read_deleted="no").\
|
read_deleted="no").\
|
||||||
filter_by(uuid=uuid).\
|
filter_by(uuid=uuid).first()
|
||||||
first()
|
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
raise exception.ReservationNotFound(uuid=uuid)
|
raise exception.ReservationNotFound(uuid=uuid)
|
||||||
@ -607,8 +607,7 @@ def reservation_get_all_by_project(context, project_id):
|
|||||||
authorize_project_context(context, project_id)
|
authorize_project_context(context, project_id)
|
||||||
|
|
||||||
rows = model_query(context, models.QuotaUsage, read_deleted="no").\
|
rows = model_query(context, models.QuotaUsage, read_deleted="no").\
|
||||||
filter_by(project_id=project_id).\
|
filter_by(project_id=project_id).all()
|
||||||
all()
|
|
||||||
|
|
||||||
result = {'project_id': project_id}
|
result = {'project_id': project_id}
|
||||||
for row in rows:
|
for row in rows:
|
||||||
@ -653,9 +652,9 @@ def _get_quota_usages(context, session):
|
|||||||
rows = model_query(context, models.QuotaUsage,
|
rows = model_query(context, models.QuotaUsage,
|
||||||
read_deleted="no",
|
read_deleted="no",
|
||||||
session=session).\
|
session=session).\
|
||||||
filter_by(project_id=context.project_id).\
|
filter_by(project_id=context.project_id).\
|
||||||
with_lockmode('update').\
|
with_lockmode('update').\
|
||||||
all()
|
all()
|
||||||
return dict((row.resource, row) for row in rows)
|
return dict((row.resource, row) for row in rows)
|
||||||
|
|
||||||
|
|
||||||
@ -798,9 +797,9 @@ def _quota_reservations(session, context, reservations):
|
|||||||
return model_query(context, models.Reservation,
|
return model_query(context, models.Reservation,
|
||||||
read_deleted="no",
|
read_deleted="no",
|
||||||
session=session).\
|
session=session).\
|
||||||
filter(models.Reservation.uuid.in_(reservations)).\
|
filter(models.Reservation.uuid.in_(reservations)).\
|
||||||
with_lockmode('update').\
|
with_lockmode('update').\
|
||||||
all()
|
all()
|
||||||
|
|
||||||
|
|
||||||
@require_context
|
@require_context
|
||||||
@ -844,24 +843,24 @@ def quota_destroy_all_by_project(context, project_id):
|
|||||||
with session.begin():
|
with session.begin():
|
||||||
quotas = model_query(context, models.Quota, session=session,
|
quotas = model_query(context, models.Quota, session=session,
|
||||||
read_deleted="no").\
|
read_deleted="no").\
|
||||||
filter_by(project_id=project_id).\
|
filter_by(project_id=project_id).\
|
||||||
all()
|
all()
|
||||||
|
|
||||||
for quota_ref in quotas:
|
for quota_ref in quotas:
|
||||||
quota_ref.delete(session=session)
|
quota_ref.delete(session=session)
|
||||||
|
|
||||||
quota_usages = model_query(context, models.QuotaUsage,
|
quota_usages = model_query(context, models.QuotaUsage,
|
||||||
session=session, read_deleted="no").\
|
session=session, read_deleted="no").\
|
||||||
filter_by(project_id=project_id).\
|
filter_by(project_id=project_id).\
|
||||||
all()
|
all()
|
||||||
|
|
||||||
for quota_usage_ref in quota_usages:
|
for quota_usage_ref in quota_usages:
|
||||||
quota_usage_ref.delete(session=session)
|
quota_usage_ref.delete(session=session)
|
||||||
|
|
||||||
reservations = model_query(context, models.Reservation,
|
reservations = model_query(context, models.Reservation,
|
||||||
session=session, read_deleted="no").\
|
session=session, read_deleted="no").\
|
||||||
filter_by(project_id=project_id).\
|
filter_by(project_id=project_id).\
|
||||||
all()
|
all()
|
||||||
|
|
||||||
for reservation_ref in reservations:
|
for reservation_ref in reservations:
|
||||||
reservation_ref.delete(session=session)
|
reservation_ref.delete(session=session)
|
||||||
@ -874,8 +873,8 @@ def reservation_expire(context):
|
|||||||
current_time = timeutils.utcnow()
|
current_time = timeutils.utcnow()
|
||||||
results = model_query(context, models.Reservation, session=session,
|
results = model_query(context, models.Reservation, session=session,
|
||||||
read_deleted="no").\
|
read_deleted="no").\
|
||||||
filter(models.Reservation.expire < current_time).\
|
filter(models.Reservation.expire < current_time).\
|
||||||
all()
|
all()
|
||||||
|
|
||||||
if results:
|
if results:
|
||||||
for reservation in results:
|
for reservation in results:
|
||||||
@ -895,10 +894,10 @@ def volume_allocate_iscsi_target(context, volume_id, host):
|
|||||||
with session.begin():
|
with session.begin():
|
||||||
iscsi_target_ref = model_query(context, models.IscsiTarget,
|
iscsi_target_ref = model_query(context, models.IscsiTarget,
|
||||||
session=session, read_deleted="no").\
|
session=session, read_deleted="no").\
|
||||||
filter_by(volume=None).\
|
filter_by(volume=None).\
|
||||||
filter_by(host=host).\
|
filter_by(host=host).\
|
||||||
with_lockmode('update').\
|
with_lockmode('update').\
|
||||||
first()
|
first()
|
||||||
|
|
||||||
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
|
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
|
||||||
# then this has concurrency issues
|
# then this has concurrency issues
|
||||||
@ -949,8 +948,8 @@ def volume_data_get_for_host(context, host, session=None):
|
|||||||
func.sum(models.Volume.size),
|
func.sum(models.Volume.size),
|
||||||
read_deleted="no",
|
read_deleted="no",
|
||||||
session=session).\
|
session=session).\
|
||||||
filter_by(host=host).\
|
filter_by(host=host).\
|
||||||
first()
|
first()
|
||||||
|
|
||||||
# NOTE(vish): convert None to 0
|
# NOTE(vish): convert None to 0
|
||||||
return (result[0] or 0, result[1] or 0)
|
return (result[0] or 0, result[1] or 0)
|
||||||
@ -963,8 +962,8 @@ def volume_data_get_for_project(context, project_id, session=None):
|
|||||||
func.sum(models.Volume.size),
|
func.sum(models.Volume.size),
|
||||||
read_deleted="no",
|
read_deleted="no",
|
||||||
session=session).\
|
session=session).\
|
||||||
filter_by(project_id=project_id).\
|
filter_by(project_id=project_id).\
|
||||||
first()
|
first()
|
||||||
|
|
||||||
# NOTE(vish): convert None to 0
|
# NOTE(vish): convert None to 0
|
||||||
return (result[0] or 0, result[1] or 0)
|
return (result[0] or 0, result[1] or 0)
|
||||||
@ -975,19 +974,19 @@ def volume_destroy(context, volume_id):
|
|||||||
session = get_session()
|
session = get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
session.query(models.Volume).\
|
session.query(models.Volume).\
|
||||||
filter_by(id=volume_id).\
|
filter_by(id=volume_id).\
|
||||||
update({'status': 'deleted',
|
update({'status': 'deleted',
|
||||||
'deleted': True,
|
'deleted': True,
|
||||||
'deleted_at': timeutils.utcnow(),
|
'deleted_at': timeutils.utcnow(),
|
||||||
'updated_at': literal_column('updated_at')})
|
'updated_at': literal_column('updated_at')})
|
||||||
session.query(models.IscsiTarget).\
|
session.query(models.IscsiTarget).\
|
||||||
filter_by(volume_id=volume_id).\
|
filter_by(volume_id=volume_id).\
|
||||||
update({'volume_id': None})
|
update({'volume_id': None})
|
||||||
session.query(models.VolumeMetadata).\
|
session.query(models.VolumeMetadata).\
|
||||||
filter_by(volume_id=volume_id).\
|
filter_by(volume_id=volume_id).\
|
||||||
update({'deleted': True,
|
update({'deleted': True,
|
||||||
'deleted_at': timeutils.utcnow(),
|
'deleted_at': timeutils.utcnow(),
|
||||||
'updated_at': literal_column('updated_at')})
|
'updated_at': literal_column('updated_at')})
|
||||||
|
|
||||||
|
|
||||||
@require_admin_context
|
@require_admin_context
|
||||||
@ -1006,15 +1005,15 @@ def volume_detached(context, volume_id):
|
|||||||
def _volume_get_query(context, session=None, project_only=False):
|
def _volume_get_query(context, session=None, project_only=False):
|
||||||
return model_query(context, models.Volume, session=session,
|
return model_query(context, models.Volume, session=session,
|
||||||
project_only=project_only).\
|
project_only=project_only).\
|
||||||
options(joinedload('volume_metadata')).\
|
options(joinedload('volume_metadata')).\
|
||||||
options(joinedload('volume_type'))
|
options(joinedload('volume_type'))
|
||||||
|
|
||||||
|
|
||||||
@require_context
|
@require_context
|
||||||
def volume_get(context, volume_id, session=None):
|
def volume_get(context, volume_id, session=None):
|
||||||
result = _volume_get_query(context, session=session, project_only=True).\
|
result = _volume_get_query(context, session=session, project_only=True).\
|
||||||
filter_by(id=volume_id).\
|
filter_by(id=volume_id).\
|
||||||
first()
|
first()
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
raise exception.VolumeNotFound(volume_id=volume_id)
|
raise exception.VolumeNotFound(volume_id=volume_id)
|
||||||
@ -1035,10 +1034,10 @@ def volume_get_all_by_host(context, host):
|
|||||||
@require_admin_context
|
@require_admin_context
|
||||||
def volume_get_all_by_instance_uuid(context, instance_uuid):
|
def volume_get_all_by_instance_uuid(context, instance_uuid):
|
||||||
result = model_query(context, models.Volume, read_deleted="no").\
|
result = model_query(context, models.Volume, read_deleted="no").\
|
||||||
options(joinedload('volume_metadata')).\
|
options(joinedload('volume_metadata')).\
|
||||||
options(joinedload('volume_type')).\
|
options(joinedload('volume_type')).\
|
||||||
filter_by(instance_uuid=instance_uuid).\
|
filter_by(instance_uuid=instance_uuid).\
|
||||||
all()
|
all()
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
return []
|
return []
|
||||||
@ -1055,8 +1054,8 @@ def volume_get_all_by_project(context, project_id):
|
|||||||
@require_admin_context
|
@require_admin_context
|
||||||
def volume_get_iscsi_target_num(context, volume_id):
|
def volume_get_iscsi_target_num(context, volume_id):
|
||||||
result = model_query(context, models.IscsiTarget, read_deleted="yes").\
|
result = model_query(context, models.IscsiTarget, read_deleted="yes").\
|
||||||
filter_by(volume_id=volume_id).\
|
filter_by(volume_id=volume_id).\
|
||||||
first()
|
first()
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id)
|
raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id)
|
||||||
@ -1085,7 +1084,7 @@ def volume_update(context, volume_id, values):
|
|||||||
def _volume_metadata_get_query(context, volume_id, session=None):
|
def _volume_metadata_get_query(context, volume_id, session=None):
|
||||||
return model_query(context, models.VolumeMetadata,
|
return model_query(context, models.VolumeMetadata,
|
||||||
session=session, read_deleted="no").\
|
session=session, read_deleted="no").\
|
||||||
filter_by(volume_id=volume_id)
|
filter_by(volume_id=volume_id)
|
||||||
|
|
||||||
|
|
||||||
@require_context
|
@require_context
|
||||||
@ -1113,8 +1112,8 @@ def volume_metadata_delete(context, volume_id, key):
|
|||||||
@require_volume_exists
|
@require_volume_exists
|
||||||
def volume_metadata_get_item(context, volume_id, key, session=None):
|
def volume_metadata_get_item(context, volume_id, key, session=None):
|
||||||
result = _volume_metadata_get_query(context, volume_id, session=session).\
|
result = _volume_metadata_get_query(context, volume_id, session=session).\
|
||||||
filter_by(key=key).\
|
filter_by(key=key).\
|
||||||
first()
|
first()
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
raise exception.VolumeMetadataNotFound(metadata_key=key,
|
raise exception.VolumeMetadataNotFound(metadata_key=key,
|
||||||
@ -1179,19 +1178,19 @@ def snapshot_destroy(context, snapshot_id):
|
|||||||
session = get_session()
|
session = get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
session.query(models.Snapshot).\
|
session.query(models.Snapshot).\
|
||||||
filter_by(id=snapshot_id).\
|
filter_by(id=snapshot_id).\
|
||||||
update({'status': 'deleted',
|
update({'status': 'deleted',
|
||||||
'deleted': True,
|
'deleted': True,
|
||||||
'deleted_at': timeutils.utcnow(),
|
'deleted_at': timeutils.utcnow(),
|
||||||
'updated_at': literal_column('updated_at')})
|
'updated_at': literal_column('updated_at')})
|
||||||
|
|
||||||
|
|
||||||
@require_context
|
@require_context
|
||||||
def snapshot_get(context, snapshot_id, session=None):
|
def snapshot_get(context, snapshot_id, session=None):
|
||||||
result = model_query(context, models.Snapshot, session=session,
|
result = model_query(context, models.Snapshot, session=session,
|
||||||
project_only=True).\
|
project_only=True).\
|
||||||
filter_by(id=snapshot_id).\
|
filter_by(id=snapshot_id).\
|
||||||
first()
|
first()
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
|
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
|
||||||
@ -1208,15 +1207,15 @@ def snapshot_get_all(context):
|
|||||||
def snapshot_get_all_for_volume(context, volume_id):
|
def snapshot_get_all_for_volume(context, volume_id):
|
||||||
return model_query(context, models.Snapshot, read_deleted='no',
|
return model_query(context, models.Snapshot, read_deleted='no',
|
||||||
project_only=True).\
|
project_only=True).\
|
||||||
filter_by(volume_id=volume_id).all()
|
filter_by(volume_id=volume_id).all()
|
||||||
|
|
||||||
|
|
||||||
@require_context
|
@require_context
|
||||||
def snapshot_get_all_by_project(context, project_id):
|
def snapshot_get_all_by_project(context, project_id):
|
||||||
authorize_project_context(context, project_id)
|
authorize_project_context(context, project_id)
|
||||||
return model_query(context, models.Snapshot).\
|
return model_query(context, models.Snapshot).\
|
||||||
filter_by(project_id=project_id).\
|
filter_by(project_id=project_id).\
|
||||||
all()
|
all()
|
||||||
|
|
||||||
|
|
||||||
@require_context
|
@require_context
|
||||||
@ -1227,8 +1226,8 @@ def snapshot_data_get_for_project(context, project_id, session=None):
|
|||||||
func.sum(models.Snapshot.volume_size),
|
func.sum(models.Snapshot.volume_size),
|
||||||
read_deleted="no",
|
read_deleted="no",
|
||||||
session=session).\
|
session=session).\
|
||||||
filter_by(project_id=project_id).\
|
filter_by(project_id=project_id).\
|
||||||
first()
|
first()
|
||||||
|
|
||||||
# NOTE(vish): convert None to 0
|
# NOTE(vish): convert None to 0
|
||||||
return (result[0] or 0, result[1] or 0)
|
return (result[0] or 0, result[1] or 0)
|
||||||
@ -1268,8 +1267,8 @@ def migration_update(context, id, values):
|
|||||||
def migration_get(context, id, session=None):
|
def migration_get(context, id, session=None):
|
||||||
result = model_query(context, models.Migration, session=session,
|
result = model_query(context, models.Migration, session=session,
|
||||||
read_deleted="yes").\
|
read_deleted="yes").\
|
||||||
filter_by(id=id).\
|
filter_by(id=id).\
|
||||||
first()
|
first()
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
raise exception.MigrationNotFound(migration_id=id)
|
raise exception.MigrationNotFound(migration_id=id)
|
||||||
@ -1280,9 +1279,9 @@ def migration_get(context, id, session=None):
|
|||||||
@require_admin_context
|
@require_admin_context
|
||||||
def migration_get_by_instance_and_status(context, instance_uuid, status):
|
def migration_get_by_instance_and_status(context, instance_uuid, status):
|
||||||
result = model_query(context, models.Migration, read_deleted="yes").\
|
result = model_query(context, models.Migration, read_deleted="yes").\
|
||||||
filter_by(instance_uuid=instance_uuid).\
|
filter_by(instance_uuid=instance_uuid).\
|
||||||
filter_by(status=status).\
|
filter_by(status=status).\
|
||||||
first()
|
first()
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid,
|
raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid,
|
||||||
@ -1294,13 +1293,13 @@ def migration_get_by_instance_and_status(context, instance_uuid, status):
|
|||||||
@require_admin_context
|
@require_admin_context
|
||||||
def migration_get_all_unconfirmed(context, confirm_window, session=None):
|
def migration_get_all_unconfirmed(context, confirm_window, session=None):
|
||||||
confirm_window = timeutils.utcnow() - datetime.timedelta(
|
confirm_window = timeutils.utcnow() - datetime.timedelta(
|
||||||
seconds=confirm_window)
|
seconds=confirm_window)
|
||||||
|
|
||||||
return model_query(context, models.Migration, session=session,
|
return model_query(context, models.Migration, session=session,
|
||||||
read_deleted="yes").\
|
read_deleted="yes").\
|
||||||
filter(models.Migration.updated_at <= confirm_window).\
|
filter(models.Migration.updated_at <= confirm_window).\
|
||||||
filter_by(status="finished").\
|
filter_by(status="finished").\
|
||||||
all()
|
all()
|
||||||
|
|
||||||
|
|
||||||
##################
|
##################
|
||||||
@ -1342,9 +1341,9 @@ def volume_type_get_all(context, inactive=False, filters=None):
|
|||||||
read_deleted = "yes" if inactive else "no"
|
read_deleted = "yes" if inactive else "no"
|
||||||
rows = model_query(context, models.VolumeTypes,
|
rows = model_query(context, models.VolumeTypes,
|
||||||
read_deleted=read_deleted).\
|
read_deleted=read_deleted).\
|
||||||
options(joinedload('extra_specs')).\
|
options(joinedload('extra_specs')).\
|
||||||
order_by("name").\
|
order_by("name").\
|
||||||
all()
|
all()
|
||||||
|
|
||||||
# TODO(sirp): this patern of converting rows to a result with extra_specs
|
# TODO(sirp): this patern of converting rows to a result with extra_specs
|
||||||
# is repeated quite a bit, might be worth creating a method for it
|
# is repeated quite a bit, might be worth creating a method for it
|
||||||
@ -1359,9 +1358,9 @@ def volume_type_get_all(context, inactive=False, filters=None):
|
|||||||
def volume_type_get(context, id, session=None):
|
def volume_type_get(context, id, session=None):
|
||||||
"""Returns a dict describing specific volume_type"""
|
"""Returns a dict describing specific volume_type"""
|
||||||
result = model_query(context, models.VolumeTypes, session=session).\
|
result = model_query(context, models.VolumeTypes, session=session).\
|
||||||
options(joinedload('extra_specs')).\
|
options(joinedload('extra_specs')).\
|
||||||
filter_by(id=id).\
|
filter_by(id=id).\
|
||||||
first()
|
first()
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
raise exception.VolumeTypeNotFound(volume_type_id=id)
|
raise exception.VolumeTypeNotFound(volume_type_id=id)
|
||||||
@ -1373,9 +1372,9 @@ def volume_type_get(context, id, session=None):
|
|||||||
def volume_type_get_by_name(context, name, session=None):
|
def volume_type_get_by_name(context, name, session=None):
|
||||||
"""Returns a dict describing specific volume_type"""
|
"""Returns a dict describing specific volume_type"""
|
||||||
result = model_query(context, models.VolumeTypes, session=session).\
|
result = model_query(context, models.VolumeTypes, session=session).\
|
||||||
options(joinedload('extra_specs')).\
|
options(joinedload('extra_specs')).\
|
||||||
filter_by(name=name).\
|
filter_by(name=name).\
|
||||||
first()
|
first()
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
|
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
|
||||||
@ -1391,25 +1390,27 @@ def volume_type_destroy(context, name):
|
|||||||
session=session)
|
session=session)
|
||||||
volume_type_id = volume_type_ref['id']
|
volume_type_id = volume_type_ref['id']
|
||||||
session.query(models.VolumeTypes).\
|
session.query(models.VolumeTypes).\
|
||||||
filter_by(id=volume_type_id).\
|
filter_by(id=volume_type_id).\
|
||||||
update({'deleted': True,
|
update({'deleted': True,
|
||||||
'deleted_at': timeutils.utcnow(),
|
'deleted_at': timeutils.utcnow(),
|
||||||
'updated_at': literal_column('updated_at')})
|
'updated_at': literal_column('updated_at')})
|
||||||
session.query(models.VolumeTypeExtraSpecs).\
|
session.query(models.VolumeTypeExtraSpecs).\
|
||||||
filter_by(volume_type_id=volume_type_id).\
|
filter_by(volume_type_id=volume_type_id).\
|
||||||
update({'deleted': True,
|
update({'deleted': True,
|
||||||
'deleted_at': timeutils.utcnow(),
|
'deleted_at': timeutils.utcnow(),
|
||||||
'updated_at': literal_column('updated_at')})
|
'updated_at': literal_column('updated_at')})
|
||||||
|
|
||||||
|
|
||||||
@require_context
|
@require_context
|
||||||
def volume_get_active_by_window(context, begin, end=None,
|
def volume_get_active_by_window(context,
|
||||||
project_id=None):
|
begin,
|
||||||
|
end=None,
|
||||||
|
project_id=None):
|
||||||
"""Return volumes that were active during window."""
|
"""Return volumes that were active during window."""
|
||||||
session = get_session()
|
session = get_session()
|
||||||
query = session.query(models.Volume)
|
query = session.query(models.Volume)
|
||||||
|
|
||||||
query = query.filter(or_(models.Volume.deleted_at == None,
|
query = query.filter(or_(models.Volume.deleted_at is None,
|
||||||
models.Volume.deleted_at > begin))
|
models.Volume.deleted_at > begin))
|
||||||
if end:
|
if end:
|
||||||
query = query.filter(models.Volume.created_at < end)
|
query = query.filter(models.Volume.created_at < end)
|
||||||
@ -1425,13 +1426,13 @@ def volume_get_active_by_window(context, begin, end=None,
|
|||||||
def _volume_type_extra_specs_query(context, volume_type_id, session=None):
|
def _volume_type_extra_specs_query(context, volume_type_id, session=None):
|
||||||
return model_query(context, models.VolumeTypeExtraSpecs, session=session,
|
return model_query(context, models.VolumeTypeExtraSpecs, session=session,
|
||||||
read_deleted="no").\
|
read_deleted="no").\
|
||||||
filter_by(volume_type_id=volume_type_id)
|
filter_by(volume_type_id=volume_type_id)
|
||||||
|
|
||||||
|
|
||||||
@require_context
|
@require_context
|
||||||
def volume_type_extra_specs_get(context, volume_type_id):
|
def volume_type_extra_specs_get(context, volume_type_id):
|
||||||
rows = _volume_type_extra_specs_query(context, volume_type_id).\
|
rows = _volume_type_extra_specs_query(context, volume_type_id).\
|
||||||
all()
|
all()
|
||||||
|
|
||||||
result = {}
|
result = {}
|
||||||
for row in rows:
|
for row in rows:
|
||||||
@ -1453,13 +1454,14 @@ def volume_type_extra_specs_delete(context, volume_type_id, key):
|
|||||||
def volume_type_extra_specs_get_item(context, volume_type_id, key,
|
def volume_type_extra_specs_get_item(context, volume_type_id, key,
|
||||||
session=None):
|
session=None):
|
||||||
result = _volume_type_extra_specs_query(
|
result = _volume_type_extra_specs_query(
|
||||||
context, volume_type_id, session=session).\
|
context, volume_type_id, session=session).\
|
||||||
filter_by(key=key).\
|
filter_by(key=key).\
|
||||||
first()
|
first()
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
raise exception.VolumeTypeExtraSpecsNotFound(
|
raise exception.VolumeTypeExtraSpecsNotFound(
|
||||||
extra_specs_key=key, volume_type_id=volume_type_id)
|
extra_specs_key=key,
|
||||||
|
volume_type_id=volume_type_id)
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@ -1493,8 +1495,8 @@ def volume_glance_metadata_get(context, volume_id, session=None):
|
|||||||
session = get_session()
|
session = get_session()
|
||||||
|
|
||||||
return session.query(models.VolumeGlanceMetadata).\
|
return session.query(models.VolumeGlanceMetadata).\
|
||||||
filter_by(volume_id=volume_id).\
|
filter_by(volume_id=volume_id).\
|
||||||
filter_by(deleted=False).all()
|
filter_by(deleted=False).all()
|
||||||
|
|
||||||
|
|
||||||
@require_context
|
@require_context
|
||||||
@ -1505,8 +1507,8 @@ def volume_snapshot_glance_metadata_get(context, snapshot_id, session=None):
|
|||||||
session = get_session()
|
session = get_session()
|
||||||
|
|
||||||
return session.query(models.VolumeGlanceMetadata).\
|
return session.query(models.VolumeGlanceMetadata).\
|
||||||
filter_by(snapshot_id=snapshot_id).\
|
filter_by(snapshot_id=snapshot_id).\
|
||||||
filter_by(deleted=False).all()
|
filter_by(deleted=False).all()
|
||||||
|
|
||||||
|
|
||||||
@require_context
|
@require_context
|
||||||
@ -1523,9 +1525,9 @@ def volume_glance_metadata_create(context, volume_id, key, value,
|
|||||||
|
|
||||||
with session.begin():
|
with session.begin():
|
||||||
rows = session.query(models.VolumeGlanceMetadata).\
|
rows = session.query(models.VolumeGlanceMetadata).\
|
||||||
filter_by(volume_id=volume_id).\
|
filter_by(volume_id=volume_id).\
|
||||||
filter_by(key=key).\
|
filter_by(key=key).\
|
||||||
filter_by(deleted=False).all()
|
filter_by(deleted=False).all()
|
||||||
|
|
||||||
if len(rows) > 0:
|
if len(rows) > 0:
|
||||||
raise exception.GlanceMetadataExists(key=key,
|
raise exception.GlanceMetadataExists(key=key,
|
||||||
@ -1577,7 +1579,7 @@ def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id,
|
|||||||
session = get_session()
|
session = get_session()
|
||||||
|
|
||||||
metadata = volume_snapshot_glance_metadata_get(context, snapshot_id,
|
metadata = volume_snapshot_glance_metadata_get(context, snapshot_id,
|
||||||
session=session)
|
session=session)
|
||||||
with session.begin():
|
with session.begin():
|
||||||
for meta in metadata:
|
for meta in metadata:
|
||||||
vol_glance_metadata = models.VolumeGlanceMetadata()
|
vol_glance_metadata = models.VolumeGlanceMetadata()
|
||||||
@ -1628,8 +1630,8 @@ def sm_backend_conf_update(context, sm_backend_id, values):
|
|||||||
backend_conf = model_query(context, models.SMBackendConf,
|
backend_conf = model_query(context, models.SMBackendConf,
|
||||||
session=session,
|
session=session,
|
||||||
read_deleted="yes").\
|
read_deleted="yes").\
|
||||||
filter_by(id=sm_backend_id).\
|
filter_by(id=sm_backend_id).\
|
||||||
first()
|
first()
|
||||||
|
|
||||||
if not backend_conf:
|
if not backend_conf:
|
||||||
raise exception.NotFound(
|
raise exception.NotFound(
|
||||||
@ -1648,15 +1650,15 @@ def sm_backend_conf_delete(context, sm_backend_id):
|
|||||||
with session.begin():
|
with session.begin():
|
||||||
model_query(context, models.SMBackendConf, session=session,
|
model_query(context, models.SMBackendConf, session=session,
|
||||||
read_deleted="yes").\
|
read_deleted="yes").\
|
||||||
filter_by(id=sm_backend_id).\
|
filter_by(id=sm_backend_id).\
|
||||||
delete()
|
delete()
|
||||||
|
|
||||||
|
|
||||||
@require_admin_context
|
@require_admin_context
|
||||||
def sm_backend_conf_get(context, sm_backend_id):
|
def sm_backend_conf_get(context, sm_backend_id):
|
||||||
result = model_query(context, models.SMBackendConf, read_deleted="yes").\
|
result = model_query(context, models.SMBackendConf, read_deleted="yes").\
|
||||||
filter_by(id=sm_backend_id).\
|
filter_by(id=sm_backend_id).\
|
||||||
first()
|
first()
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
raise exception.NotFound(_("No backend config with id "
|
raise exception.NotFound(_("No backend config with id "
|
||||||
@ -1668,14 +1670,14 @@ def sm_backend_conf_get(context, sm_backend_id):
|
|||||||
@require_admin_context
|
@require_admin_context
|
||||||
def sm_backend_conf_get_by_sr(context, sr_uuid):
|
def sm_backend_conf_get_by_sr(context, sr_uuid):
|
||||||
return model_query(context, models.SMBackendConf, read_deleted="yes").\
|
return model_query(context, models.SMBackendConf, read_deleted="yes").\
|
||||||
filter_by(sr_uuid=sr_uuid).\
|
filter_by(sr_uuid=sr_uuid).\
|
||||||
first()
|
first()
|
||||||
|
|
||||||
|
|
||||||
@require_admin_context
|
@require_admin_context
|
||||||
def sm_backend_conf_get_all(context):
|
def sm_backend_conf_get_all(context):
|
||||||
return model_query(context, models.SMBackendConf, read_deleted="yes").\
|
return model_query(context, models.SMBackendConf, read_deleted="yes").\
|
||||||
all()
|
all()
|
||||||
|
|
||||||
|
|
||||||
####################
|
####################
|
||||||
@ -1684,7 +1686,7 @@ def sm_backend_conf_get_all(context):
|
|||||||
def _sm_flavor_get_query(context, sm_flavor_label, session=None):
|
def _sm_flavor_get_query(context, sm_flavor_label, session=None):
|
||||||
return model_query(context, models.SMFlavors, session=session,
|
return model_query(context, models.SMFlavors, session=session,
|
||||||
read_deleted="yes").\
|
read_deleted="yes").\
|
||||||
filter_by(label=sm_flavor_label)
|
filter_by(label=sm_flavor_label)
|
||||||
|
|
||||||
|
|
||||||
@require_admin_context
|
@require_admin_context
|
||||||
@ -1716,7 +1718,7 @@ def sm_flavor_get(context, sm_flavor_label):
|
|||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
raise exception.NotFound(
|
raise exception.NotFound(
|
||||||
_("No sm_flavor called %(sm_flavor)s") % locals())
|
_("No sm_flavor called %(sm_flavor)s") % locals())
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@ -1732,7 +1734,7 @@ def sm_flavor_get_all(context):
|
|||||||
def _sm_volume_get_query(context, volume_id, session=None):
|
def _sm_volume_get_query(context, volume_id, session=None):
|
||||||
return model_query(context, models.SMVolume, session=session,
|
return model_query(context, models.SMVolume, session=session,
|
||||||
read_deleted="yes").\
|
read_deleted="yes").\
|
||||||
filter_by(id=volume_id)
|
filter_by(id=volume_id)
|
||||||
|
|
||||||
|
|
||||||
def sm_volume_create(context, values):
|
def sm_volume_create(context, values):
|
||||||
@ -1760,7 +1762,7 @@ def sm_volume_get(context, volume_id):
|
|||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
raise exception.NotFound(
|
raise exception.NotFound(
|
||||||
_("No sm_volume with id %(volume_id)s") % locals())
|
_("No sm_volume with id %(volume_id)s") % locals())
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
@ -28,23 +28,29 @@ def upgrade(migrate_engine):
|
|||||||
|
|
||||||
# New table
|
# New table
|
||||||
quota_classes = Table('quota_classes', meta,
|
quota_classes = Table('quota_classes', meta,
|
||||||
Column('created_at', DateTime(timezone=False)),
|
Column('created_at', DateTime(timezone=False)),
|
||||||
Column('updated_at', DateTime(timezone=False)),
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
Column('deleted_at', DateTime(timezone=False)),
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
Column('deleted', Boolean(create_constraint=True, name=None)),
|
Column('deleted', Boolean(create_constraint=True,
|
||||||
Column('id', Integer(), primary_key=True),
|
name=None)),
|
||||||
Column('class_name',
|
Column('id', Integer(), primary_key=True),
|
||||||
String(length=255, convert_unicode=True,
|
Column('class_name',
|
||||||
assert_unicode=None, unicode_error=None,
|
String(length=255,
|
||||||
_warn_on_bytestring=False), index=True),
|
convert_unicode=True,
|
||||||
Column('resource',
|
assert_unicode=None,
|
||||||
String(length=255, convert_unicode=True,
|
unicode_error=None,
|
||||||
assert_unicode=None, unicode_error=None,
|
_warn_on_bytestring=False),
|
||||||
_warn_on_bytestring=False)),
|
index=True),
|
||||||
Column('hard_limit', Integer(), nullable=True),
|
Column('resource',
|
||||||
mysql_engine='InnoDB',
|
String(length=255,
|
||||||
mysql_charset='utf8',
|
convert_unicode=True,
|
||||||
)
|
assert_unicode=None,
|
||||||
|
unicode_error=None,
|
||||||
|
_warn_on_bytestring=False)),
|
||||||
|
Column('hard_limit', Integer(), nullable=True),
|
||||||
|
mysql_engine='InnoDB',
|
||||||
|
mysql_charset='utf8',
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
quota_classes.create()
|
quota_classes.create()
|
||||||
@ -53,26 +59,27 @@ def upgrade(migrate_engine):
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
quota_usages = Table('quota_usages', meta,
|
quota_usages = Table('quota_usages', meta,
|
||||||
Column('created_at', DateTime(timezone=False)),
|
Column('created_at', DateTime(timezone=False)),
|
||||||
Column('updated_at', DateTime(timezone=False)),
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
Column('deleted_at', DateTime(timezone=False)),
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
Column('deleted', Boolean(create_constraint=True, name=None)),
|
Column('deleted', Boolean(create_constraint=True,
|
||||||
Column('id', Integer(), primary_key=True),
|
name=None)),
|
||||||
Column('project_id',
|
Column('id', Integer(), primary_key=True),
|
||||||
String(length=255, convert_unicode=True,
|
Column('project_id',
|
||||||
assert_unicode=None, unicode_error=None,
|
String(length=255, convert_unicode=True,
|
||||||
_warn_on_bytestring=False),
|
assert_unicode=None, unicode_error=None,
|
||||||
index=True),
|
_warn_on_bytestring=False),
|
||||||
Column('resource',
|
index=True),
|
||||||
String(length=255, convert_unicode=True,
|
Column('resource',
|
||||||
assert_unicode=None, unicode_error=None,
|
String(length=255, convert_unicode=True,
|
||||||
_warn_on_bytestring=False)),
|
assert_unicode=None, unicode_error=None,
|
||||||
Column('in_use', Integer(), nullable=False),
|
_warn_on_bytestring=False)),
|
||||||
Column('reserved', Integer(), nullable=False),
|
Column('in_use', Integer(), nullable=False),
|
||||||
Column('until_refresh', Integer(), nullable=True),
|
Column('reserved', Integer(), nullable=False),
|
||||||
mysql_engine='InnoDB',
|
Column('until_refresh', Integer(), nullable=True),
|
||||||
mysql_charset='utf8',
|
mysql_engine='InnoDB',
|
||||||
)
|
mysql_charset='utf8',
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
quota_usages.create()
|
quota_usages.create()
|
||||||
@ -81,31 +88,37 @@ def upgrade(migrate_engine):
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
reservations = Table('reservations', meta,
|
reservations = Table('reservations', meta,
|
||||||
Column('created_at', DateTime(timezone=False)),
|
Column('created_at', DateTime(timezone=False)),
|
||||||
Column('updated_at', DateTime(timezone=False)),
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
Column('deleted_at', DateTime(timezone=False)),
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
Column('deleted', Boolean(create_constraint=True, name=None)),
|
Column('deleted', Boolean(create_constraint=True,
|
||||||
Column('id', Integer(), primary_key=True),
|
name=None)),
|
||||||
Column('uuid',
|
Column('id', Integer(), primary_key=True),
|
||||||
String(length=36, convert_unicode=True,
|
Column('uuid',
|
||||||
assert_unicode=None, unicode_error=None,
|
String(length=36,
|
||||||
_warn_on_bytestring=False), nullable=False),
|
convert_unicode=True,
|
||||||
Column('usage_id', Integer(), ForeignKey('quota_usages.id'),
|
assert_unicode=None,
|
||||||
nullable=False),
|
unicode_error=None,
|
||||||
Column('project_id',
|
_warn_on_bytestring=False),
|
||||||
String(length=255, convert_unicode=True,
|
nullable=False),
|
||||||
assert_unicode=None, unicode_error=None,
|
Column('usage_id',
|
||||||
_warn_on_bytestring=False),
|
Integer(),
|
||||||
index=True),
|
ForeignKey('quota_usages.id'),
|
||||||
Column('resource',
|
nullable=False),
|
||||||
String(length=255, convert_unicode=True,
|
Column('project_id',
|
||||||
assert_unicode=None, unicode_error=None,
|
String(length=255, convert_unicode=True,
|
||||||
_warn_on_bytestring=False)),
|
assert_unicode=None, unicode_error=None,
|
||||||
Column('delta', Integer(), nullable=False),
|
_warn_on_bytestring=False),
|
||||||
Column('expire', DateTime(timezone=False)),
|
index=True),
|
||||||
mysql_engine='InnoDB',
|
Column('resource',
|
||||||
mysql_charset='utf8',
|
String(length=255, convert_unicode=True,
|
||||||
)
|
assert_unicode=None, unicode_error=None,
|
||||||
|
_warn_on_bytestring=False)),
|
||||||
|
Column('delta', Integer(), nullable=False),
|
||||||
|
Column('expire', DateTime(timezone=False)),
|
||||||
|
mysql_engine='InnoDB',
|
||||||
|
mysql_charset='utf8',
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
reservations.create()
|
reservations.create()
|
||||||
|
@ -29,27 +29,31 @@ def upgrade(migrate_engine):
|
|||||||
# Just for the ForeignKey and column creation to succeed, these are not the
|
# Just for the ForeignKey and column creation to succeed, these are not the
|
||||||
# actual definitions of tables .
|
# actual definitions of tables .
|
||||||
#
|
#
|
||||||
volumes = Table('volumes', meta,
|
volumes = Table('volumes',
|
||||||
Column('id', Integer(), primary_key=True, nullable=False),
|
meta,
|
||||||
mysql_engine='InnoDB'
|
Column('id', Integer(),
|
||||||
)
|
primary_key=True, nullable=False),
|
||||||
snapshots = Table('snapshots', meta,
|
mysql_engine='InnoDB')
|
||||||
Column('id', Integer(), primary_key=True, nullable=False),
|
snapshots = Table('snapshots',
|
||||||
mysql_engine='InnoDB'
|
meta,
|
||||||
)
|
Column('id', Integer(),
|
||||||
|
primary_key=True, nullable=False),
|
||||||
|
mysql_engine='InnoDB')
|
||||||
# Create new table
|
# Create new table
|
||||||
volume_glance_metadata = Table('volume_glance_metadata', meta,
|
volume_glance_metadata = Table(
|
||||||
Column('created_at', DateTime(timezone=False)),
|
'volume_glance_metadata',
|
||||||
Column('updated_at', DateTime(timezone=False)),
|
meta,
|
||||||
Column('deleted_at', DateTime(timezone=False)),
|
Column('created_at', DateTime(timezone=False)),
|
||||||
Column('deleted', Boolean(create_constraint=True, name=None)),
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
Column('id', Integer(), primary_key=True, nullable=False),
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
Column('volume_id', String(length=36), ForeignKey('volumes.id')),
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
Column('snapshot_id', String(length=36),
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
ForeignKey('snapshots.id')),
|
Column('volume_id', String(length=36), ForeignKey('volumes.id')),
|
||||||
Column('key', String(255)),
|
Column('snapshot_id', String(length=36),
|
||||||
Column('value', Text),
|
ForeignKey('snapshots.id')),
|
||||||
mysql_engine='InnoDB'
|
Column('key', String(255)),
|
||||||
|
Column('value', Text),
|
||||||
|
mysql_engine='InnoDB'
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -85,7 +85,7 @@ class CinderBase(object):
|
|||||||
return n, getattr(self, n)
|
return n, getattr(self, n)
|
||||||
|
|
||||||
def update(self, values):
|
def update(self, values):
|
||||||
"""Make the model object behave like a dict"""
|
"""Make the model object behave like a dict."""
|
||||||
for k, v in values.iteritems():
|
for k, v in values.iteritems():
|
||||||
setattr(self, k, v)
|
setattr(self, k, v)
|
||||||
|
|
||||||
@ -159,7 +159,7 @@ class Volume(BASE, CinderBase):
|
|||||||
|
|
||||||
|
|
||||||
class VolumeMetadata(BASE, CinderBase):
|
class VolumeMetadata(BASE, CinderBase):
|
||||||
"""Represents a metadata key/value pair for a volume"""
|
"""Represents a metadata key/value pair for a volume."""
|
||||||
__tablename__ = 'volume_metadata'
|
__tablename__ = 'volume_metadata'
|
||||||
id = Column(Integer, primary_key=True)
|
id = Column(Integer, primary_key=True)
|
||||||
key = Column(String(255))
|
key = Column(String(255))
|
||||||
@ -173,7 +173,7 @@ class VolumeMetadata(BASE, CinderBase):
|
|||||||
|
|
||||||
|
|
||||||
class VolumeTypes(BASE, CinderBase):
|
class VolumeTypes(BASE, CinderBase):
|
||||||
"""Represent possible volume_types of volumes offered"""
|
"""Represent possible volume_types of volumes offered."""
|
||||||
__tablename__ = "volume_types"
|
__tablename__ = "volume_types"
|
||||||
id = Column(Integer, primary_key=True)
|
id = Column(Integer, primary_key=True)
|
||||||
name = Column(String(255))
|
name = Column(String(255))
|
||||||
@ -187,7 +187,7 @@ class VolumeTypes(BASE, CinderBase):
|
|||||||
|
|
||||||
|
|
||||||
class VolumeTypeExtraSpecs(BASE, CinderBase):
|
class VolumeTypeExtraSpecs(BASE, CinderBase):
|
||||||
"""Represents additional specs as key/value pairs for a volume_type"""
|
"""Represents additional specs as key/value pairs for a volume_type."""
|
||||||
__tablename__ = 'volume_type_extra_specs'
|
__tablename__ = 'volume_type_extra_specs'
|
||||||
id = Column(Integer, primary_key=True)
|
id = Column(Integer, primary_key=True)
|
||||||
key = Column(String(255))
|
key = Column(String(255))
|
||||||
@ -206,7 +206,7 @@ class VolumeTypeExtraSpecs(BASE, CinderBase):
|
|||||||
|
|
||||||
|
|
||||||
class VolumeGlanceMetadata(BASE, CinderBase):
|
class VolumeGlanceMetadata(BASE, CinderBase):
|
||||||
"""Glance metadata for a bootable volume"""
|
"""Glance metadata for a bootable volume."""
|
||||||
__tablename__ = 'volume_glance_metadata'
|
__tablename__ = 'volume_glance_metadata'
|
||||||
id = Column(Integer, primary_key=True, nullable=False)
|
id = Column(Integer, primary_key=True, nullable=False)
|
||||||
volume_id = Column(String(36), ForeignKey('volumes.id'))
|
volume_id = Column(String(36), ForeignKey('volumes.id'))
|
||||||
@ -317,7 +317,7 @@ class Snapshot(BASE, CinderBase):
|
|||||||
|
|
||||||
|
|
||||||
class IscsiTarget(BASE, CinderBase):
|
class IscsiTarget(BASE, CinderBase):
|
||||||
"""Represents an iscsi target for a given host"""
|
"""Represents an iscsi target for a given host."""
|
||||||
__tablename__ = 'iscsi_targets'
|
__tablename__ = 'iscsi_targets'
|
||||||
__table_args__ = (schema.UniqueConstraint("target_num", "host"),
|
__table_args__ = (schema.UniqueConstraint("target_num", "host"),
|
||||||
{'mysql_engine': 'InnoDB'})
|
{'mysql_engine': 'InnoDB'})
|
||||||
|
@ -138,8 +138,8 @@ def get_engine():
|
|||||||
_ENGINE.connect()
|
_ENGINE.connect()
|
||||||
break
|
break
|
||||||
except OperationalError, e:
|
except OperationalError, e:
|
||||||
if (remaining != 'infinite' and remaining == 0) or \
|
if ((remaining != 'infinite' and remaining == 0) or
|
||||||
not is_db_connection_error(e.args[0]):
|
not is_db_connection_error(e.args[0])):
|
||||||
raise
|
raise
|
||||||
return _ENGINE
|
return _ENGINE
|
||||||
|
|
||||||
|
@ -98,8 +98,7 @@ core_opts = [
|
|||||||
help='Directory where cinder binaries are installed'),
|
help='Directory where cinder binaries are installed'),
|
||||||
cfg.StrOpt('state_path',
|
cfg.StrOpt('state_path',
|
||||||
default='$pybasedir',
|
default='$pybasedir',
|
||||||
help="Top-level directory for maintaining cinder's state"),
|
help="Top-level directory for maintaining cinder's state"), ]
|
||||||
]
|
|
||||||
|
|
||||||
debug_opts = [
|
debug_opts = [
|
||||||
]
|
]
|
||||||
@ -122,8 +121,8 @@ global_opts = [
|
|||||||
help='A list of the glance api servers available to cinder '
|
help='A list of the glance api servers available to cinder '
|
||||||
'([hostname|ip]:port)'),
|
'([hostname|ip]:port)'),
|
||||||
cfg.IntOpt('glance_num_retries',
|
cfg.IntOpt('glance_num_retries',
|
||||||
default=0,
|
default=0,
|
||||||
help='Number retries when downloading an image from glance'),
|
help='Number retries when downloading an image from glance'),
|
||||||
cfg.StrOpt('scheduler_topic',
|
cfg.StrOpt('scheduler_topic',
|
||||||
default='cinder-scheduler',
|
default='cinder-scheduler',
|
||||||
help='the topic scheduler nodes listen on'),
|
help='the topic scheduler nodes listen on'),
|
||||||
@ -217,8 +216,8 @@ global_opts = [
|
|||||||
default=60,
|
default=60,
|
||||||
help='maximum time since last check-in for up service'),
|
help='maximum time since last check-in for up service'),
|
||||||
cfg.StrOpt('volume_api_class',
|
cfg.StrOpt('volume_api_class',
|
||||||
default='cinder.volume.api.API',
|
default='cinder.volume.api.API',
|
||||||
help='The full class name of the volume API class to use'),
|
help='The full class name of the volume API class to use'),
|
||||||
cfg.StrOpt('auth_strategy',
|
cfg.StrOpt('auth_strategy',
|
||||||
default='noauth',
|
default='noauth',
|
||||||
help='The strategy to use for auth. Supports noauth, keystone, '
|
help='The strategy to use for auth. Supports noauth, keystone, '
|
||||||
@ -228,7 +227,6 @@ global_opts = [
|
|||||||
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
|
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
|
||||||
cfg.BoolOpt('secure_delete',
|
cfg.BoolOpt('secure_delete',
|
||||||
default=True,
|
default=True,
|
||||||
help='Whether to perform secure delete'),
|
help='Whether to perform secure delete'), ]
|
||||||
]
|
|
||||||
|
|
||||||
FLAGS.register_opts(global_opts)
|
FLAGS.register_opts(global_opts)
|
||||||
|
@ -111,8 +111,8 @@ class GlanceClientWrapper(object):
|
|||||||
retry the request according to FLAGS.glance_num_retries.
|
retry the request according to FLAGS.glance_num_retries.
|
||||||
"""
|
"""
|
||||||
retry_excs = (glanceclient.exc.ServiceUnavailable,
|
retry_excs = (glanceclient.exc.ServiceUnavailable,
|
||||||
glanceclient.exc.InvalidEndpoint,
|
glanceclient.exc.InvalidEndpoint,
|
||||||
glanceclient.exc.CommunicationError)
|
glanceclient.exc.CommunicationError)
|
||||||
num_attempts = 1 + FLAGS.glance_num_retries
|
num_attempts = 1 + FLAGS.glance_num_retries
|
||||||
|
|
||||||
for attempt in xrange(1, num_attempts + 1):
|
for attempt in xrange(1, num_attempts + 1):
|
||||||
@ -125,12 +125,14 @@ class GlanceClientWrapper(object):
|
|||||||
port = self.port
|
port = self.port
|
||||||
extra = "retrying"
|
extra = "retrying"
|
||||||
error_msg = _("Error contacting glance server "
|
error_msg = _("Error contacting glance server "
|
||||||
"'%(host)s:%(port)s' for '%(method)s', %(extra)s.")
|
"'%(host)s:%(port)s' for '%(method)s', "
|
||||||
|
"%(extra)s.")
|
||||||
if attempt == num_attempts:
|
if attempt == num_attempts:
|
||||||
extra = 'done trying'
|
extra = 'done trying'
|
||||||
LOG.exception(error_msg, locals())
|
LOG.exception(error_msg, locals())
|
||||||
raise exception.GlanceConnectionFailed(
|
raise exception.GlanceConnectionFailed(host=host,
|
||||||
host=host, port=port, reason=str(e))
|
port=port,
|
||||||
|
reason=str(e))
|
||||||
LOG.exception(error_msg, locals())
|
LOG.exception(error_msg, locals())
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
@ -220,8 +222,8 @@ class GlanceImageService(object):
|
|||||||
|
|
||||||
return self._translate_from_glance(recv_service_image_meta)
|
return self._translate_from_glance(recv_service_image_meta)
|
||||||
|
|
||||||
def update(self, context, image_id, image_meta, data=None,
|
def update(self, context, image_id,
|
||||||
purge_props=True):
|
image_meta, data=None, purge_props=True):
|
||||||
"""Modify the given image with the new data."""
|
"""Modify the given image with the new data."""
|
||||||
image_meta = self._translate_to_glance(image_meta)
|
image_meta = self._translate_to_glance(image_meta)
|
||||||
image_meta['purge_props'] = purge_props
|
image_meta['purge_props'] = purge_props
|
||||||
@ -378,7 +380,7 @@ def _reraise_translated_exception():
|
|||||||
|
|
||||||
def _translate_image_exception(image_id, exc_value):
|
def _translate_image_exception(image_id, exc_value):
|
||||||
if isinstance(exc_value, (glanceclient.exc.Forbidden,
|
if isinstance(exc_value, (glanceclient.exc.Forbidden,
|
||||||
glanceclient.exc.Unauthorized)):
|
glanceclient.exc.Unauthorized)):
|
||||||
return exception.ImageNotAuthorized(image_id=image_id)
|
return exception.ImageNotAuthorized(image_id=image_id)
|
||||||
if isinstance(exc_value, glanceclient.exc.NotFound):
|
if isinstance(exc_value, glanceclient.exc.NotFound):
|
||||||
return exception.ImageNotFound(image_id=image_id)
|
return exception.ImageNotFound(image_id=image_id)
|
||||||
@ -389,7 +391,7 @@ def _translate_image_exception(image_id, exc_value):
|
|||||||
|
|
||||||
def _translate_plain_exception(exc_value):
|
def _translate_plain_exception(exc_value):
|
||||||
if isinstance(exc_value, (glanceclient.exc.Forbidden,
|
if isinstance(exc_value, (glanceclient.exc.Forbidden,
|
||||||
glanceclient.exc.Unauthorized)):
|
glanceclient.exc.Unauthorized)):
|
||||||
return exception.NotAuthorized(exc_value)
|
return exception.NotAuthorized(exc_value)
|
||||||
if isinstance(exc_value, glanceclient.exc.NotFound):
|
if isinstance(exc_value, glanceclient.exc.NotFound):
|
||||||
return exception.NotFound(exc_value)
|
return exception.NotFound(exc_value)
|
||||||
@ -419,7 +421,8 @@ def get_remote_image_service(context, image_href):
|
|||||||
try:
|
try:
|
||||||
(image_id, glance_host, glance_port) = _parse_image_ref(image_href)
|
(image_id, glance_host, glance_port) = _parse_image_ref(image_href)
|
||||||
glance_client = GlanceClientWrapper(context=context,
|
glance_client = GlanceClientWrapper(context=context,
|
||||||
host=glance_host, port=glance_port)
|
host=glance_host,
|
||||||
|
port=glance_port)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raise exception.InvalidImageRef(image_href=image_href)
|
raise exception.InvalidImageRef(image_href=image_href)
|
||||||
|
|
||||||
|
@ -214,5 +214,8 @@ class SchedulerDependentManager(Manager):
|
|||||||
"""Pass data back to the scheduler at a periodic interval."""
|
"""Pass data back to the scheduler at a periodic interval."""
|
||||||
if self.last_capabilities:
|
if self.last_capabilities:
|
||||||
LOG.debug(_('Notifying Schedulers of capabilities ...'))
|
LOG.debug(_('Notifying Schedulers of capabilities ...'))
|
||||||
self.scheduler_rpcapi.update_service_capabilities(context,
|
self.scheduler_rpcapi.update_service_capabilities(
|
||||||
self.service_name, self.host, self.last_capabilities)
|
context,
|
||||||
|
self.service_name,
|
||||||
|
self.host,
|
||||||
|
self.last_capabilities)
|
||||||
|
@ -30,8 +30,7 @@ policy_opts = [
|
|||||||
help=_('JSON file representing policy')),
|
help=_('JSON file representing policy')),
|
||||||
cfg.StrOpt('policy_default_rule',
|
cfg.StrOpt('policy_default_rule',
|
||||||
default='default',
|
default='default',
|
||||||
help=_('Rule checked when requested rule is not found')),
|
help=_('Rule checked when requested rule is not found')), ]
|
||||||
]
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
FLAGS.register_opts(policy_opts)
|
FLAGS.register_opts(policy_opts)
|
||||||
|
@ -49,8 +49,7 @@ quota_opts = [
|
|||||||
help='number of seconds between subsequent usage refreshes'),
|
help='number of seconds between subsequent usage refreshes'),
|
||||||
cfg.StrOpt('quota_driver',
|
cfg.StrOpt('quota_driver',
|
||||||
default='cinder.quota.DbQuotaDriver',
|
default='cinder.quota.DbQuotaDriver',
|
||||||
help='default driver to use for quota checks'),
|
help='default driver to use for quota checks'), ]
|
||||||
]
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
FLAGS.register_opts(quota_opts)
|
FLAGS.register_opts(quota_opts)
|
||||||
@ -156,9 +155,9 @@ class DbQuotaDriver(object):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
quotas[resource.name] = dict(
|
quotas[resource.name] = dict(
|
||||||
limit=project_quotas.get(resource.name, class_quotas.get(
|
limit=project_quotas.get(resource.name,
|
||||||
resource.name, resource.default)),
|
class_quotas.get(resource.name,
|
||||||
)
|
resource.default)), )
|
||||||
|
|
||||||
# Include usages if desired. This is optional because one
|
# Include usages if desired. This is optional because one
|
||||||
# internal consumer of this interface wants to access the
|
# internal consumer of this interface wants to access the
|
||||||
@ -167,8 +166,7 @@ class DbQuotaDriver(object):
|
|||||||
usage = project_usages.get(resource.name, {})
|
usage = project_usages.get(resource.name, {})
|
||||||
quotas[resource.name].update(
|
quotas[resource.name].update(
|
||||||
in_use=usage.get('in_use', 0),
|
in_use=usage.get('in_use', 0),
|
||||||
reserved=usage.get('reserved', 0),
|
reserved=usage.get('reserved', 0), )
|
||||||
)
|
|
||||||
|
|
||||||
return quotas
|
return quotas
|
||||||
|
|
||||||
@ -577,10 +575,10 @@ class QuotaEngine(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
return self._driver.get_project_quotas(context, self._resources,
|
return self._driver.get_project_quotas(context, self._resources,
|
||||||
project_id,
|
project_id,
|
||||||
quota_class=quota_class,
|
quota_class=quota_class,
|
||||||
defaults=defaults,
|
defaults=defaults,
|
||||||
usages=usages)
|
usages=usages)
|
||||||
|
|
||||||
def count(self, context, resource, *args, **kwargs):
|
def count(self, context, resource, *args, **kwargs):
|
||||||
"""Count a resource.
|
"""Count a resource.
|
||||||
@ -729,14 +727,16 @@ class QuotaEngine(object):
|
|||||||
|
|
||||||
def _sync_instances(context, project_id, session):
|
def _sync_instances(context, project_id, session):
|
||||||
return dict(zip(('instances', 'cores', 'ram'),
|
return dict(zip(('instances', 'cores', 'ram'),
|
||||||
db.instance_data_get_for_project(
|
db.instance_data_get_for_project(context,
|
||||||
context, project_id, session=session)))
|
project_id,
|
||||||
|
session=session)))
|
||||||
|
|
||||||
|
|
||||||
def _sync_volumes(context, project_id, session):
|
def _sync_volumes(context, project_id, session):
|
||||||
return dict(zip(('volumes', 'gigabytes'),
|
return dict(zip(('volumes', 'gigabytes'),
|
||||||
db.volume_data_get_for_project(
|
db.volume_data_get_for_project(context,
|
||||||
context, project_id, session=session)))
|
project_id,
|
||||||
|
session=session)))
|
||||||
|
|
||||||
|
|
||||||
QUOTAS = QuotaEngine()
|
QUOTAS = QuotaEngine()
|
||||||
@ -744,8 +744,7 @@ QUOTAS = QuotaEngine()
|
|||||||
|
|
||||||
resources = [
|
resources = [
|
||||||
ReservableResource('volumes', _sync_volumes, 'quota_volumes'),
|
ReservableResource('volumes', _sync_volumes, 'quota_volumes'),
|
||||||
ReservableResource('gigabytes', _sync_volumes, 'quota_gigabytes'),
|
ReservableResource('gigabytes', _sync_volumes, 'quota_gigabytes'), ]
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
QUOTAS.register_resources(resources)
|
QUOTAS.register_resources(resources)
|
||||||
|
@ -69,4 +69,4 @@ class ChanceScheduler(driver.Scheduler):
|
|||||||
|
|
||||||
updated_volume = driver.volume_update_db(context, volume_id, host)
|
updated_volume = driver.volume_update_db(context, volume_id, host)
|
||||||
self.volume_rpcapi.create_volume(context, updated_volume, host,
|
self.volume_rpcapi.create_volume(context, updated_volume, host,
|
||||||
snapshot_id, image_id)
|
snapshot_id, image_id)
|
||||||
|
@ -33,8 +33,7 @@ from cinder.volume import rpcapi as volume_rpcapi
|
|||||||
scheduler_driver_opts = [
|
scheduler_driver_opts = [
|
||||||
cfg.StrOpt('scheduler_host_manager',
|
cfg.StrOpt('scheduler_host_manager',
|
||||||
default='cinder.scheduler.host_manager.HostManager',
|
default='cinder.scheduler.host_manager.HostManager',
|
||||||
help='The scheduler host manager class to use'),
|
help='The scheduler host manager class to use'), ]
|
||||||
]
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
FLAGS.register_opts(scheduler_driver_opts)
|
FLAGS.register_opts(scheduler_driver_opts)
|
||||||
@ -55,7 +54,7 @@ class Scheduler(object):
|
|||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.host_manager = importutils.import_object(
|
self.host_manager = importutils.import_object(
|
||||||
FLAGS.scheduler_host_manager)
|
FLAGS.scheduler_host_manager)
|
||||||
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
|
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
|
||||||
|
|
||||||
def get_host_list(self):
|
def get_host_list(self):
|
||||||
@ -70,7 +69,8 @@ class Scheduler(object):
|
|||||||
def update_service_capabilities(self, service_name, host, capabilities):
|
def update_service_capabilities(self, service_name, host, capabilities):
|
||||||
"""Process a capability update from a service node."""
|
"""Process a capability update from a service node."""
|
||||||
self.host_manager.update_service_capabilities(service_name,
|
self.host_manager.update_service_capabilities(service_name,
|
||||||
host, capabilities)
|
host,
|
||||||
|
capabilities)
|
||||||
|
|
||||||
def hosts_up(self, context, topic):
|
def hosts_up(self, context, topic):
|
||||||
"""Return the list of hosts that have a running service for topic."""
|
"""Return the list of hosts that have a running service for topic."""
|
||||||
|
@ -36,16 +36,17 @@ from cinder.openstack.common.notifier import api as notifier
|
|||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
scheduler_driver_opt = cfg.StrOpt('scheduler_driver',
|
scheduler_driver_opt = cfg.StrOpt(
|
||||||
default='cinder.scheduler.simple.SimpleScheduler',
|
'scheduler_driver',
|
||||||
help='Default driver to use for the scheduler')
|
default='cinder.scheduler.simple.SimpleScheduler',
|
||||||
|
help='Default driver to use for the scheduler')
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
FLAGS.register_opt(scheduler_driver_opt)
|
FLAGS.register_opt(scheduler_driver_opt)
|
||||||
|
|
||||||
|
|
||||||
class SchedulerManager(manager.Manager):
|
class SchedulerManager(manager.Manager):
|
||||||
"""Chooses a host to create volumes"""
|
"""Chooses a host to create volumes."""
|
||||||
|
|
||||||
RPC_API_VERSION = '1.2'
|
RPC_API_VERSION = '1.2'
|
||||||
|
|
||||||
@ -64,12 +65,13 @@ class SchedulerManager(manager.Manager):
|
|||||||
return self.driver.get_service_capabilities()
|
return self.driver.get_service_capabilities()
|
||||||
|
|
||||||
def update_service_capabilities(self, context, service_name=None,
|
def update_service_capabilities(self, context, service_name=None,
|
||||||
host=None, capabilities=None, **kwargs):
|
host=None, capabilities=None, **kwargs):
|
||||||
"""Process a capability update from a service node."""
|
"""Process a capability update from a service node."""
|
||||||
if capabilities is None:
|
if capabilities is None:
|
||||||
capabilities = {}
|
capabilities = {}
|
||||||
self.driver.update_service_capabilities(service_name, host,
|
self.driver.update_service_capabilities(service_name,
|
||||||
capabilities)
|
host,
|
||||||
|
capabilities)
|
||||||
|
|
||||||
def create_volume(self, context, topic, volume_id, snapshot_id=None,
|
def create_volume(self, context, topic, volume_id, snapshot_id=None,
|
||||||
image_id=None, request_spec=None,
|
image_id=None, request_spec=None,
|
||||||
@ -86,11 +88,12 @@ class SchedulerManager(manager.Manager):
|
|||||||
volume_properties = {'size': size,
|
volume_properties = {'size': size,
|
||||||
'availability_zone': availability_zone,
|
'availability_zone': availability_zone,
|
||||||
'volume_type_id': volume_type_id}
|
'volume_type_id': volume_type_id}
|
||||||
request_spec.update({'volume_id': volume_id,
|
request_spec.update(
|
||||||
'snapshot_id': snapshot_id,
|
{'volume_id': volume_id,
|
||||||
'image_id': image_id,
|
'snapshot_id': snapshot_id,
|
||||||
'volume_properties': volume_properties,
|
'image_id': image_id,
|
||||||
'volume_type': dict(vol_type).iteritems()})
|
'volume_properties': volume_properties,
|
||||||
|
'volume_type': dict(vol_type).iteritems()})
|
||||||
|
|
||||||
self.driver.schedule_create_volume(context, request_spec,
|
self.driver.schedule_create_volume(context, request_spec,
|
||||||
filter_properties)
|
filter_properties)
|
||||||
|
@ -39,23 +39,26 @@ class SchedulerAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
|
|||||||
RPC_API_VERSION = '1.0'
|
RPC_API_VERSION = '1.0'
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(SchedulerAPI, self).__init__(topic=FLAGS.scheduler_topic,
|
super(SchedulerAPI, self).__init__(
|
||||||
default_version=self.RPC_API_VERSION)
|
topic=FLAGS.scheduler_topic,
|
||||||
|
default_version=self.RPC_API_VERSION)
|
||||||
|
|
||||||
def create_volume(self, ctxt, topic, volume_id, snapshot_id=None,
|
def create_volume(self, ctxt, topic, volume_id, snapshot_id=None,
|
||||||
image_id=None, request_spec=None,
|
image_id=None, request_spec=None,
|
||||||
filter_properties=None):
|
filter_properties=None):
|
||||||
return self.cast(ctxt, self.make_msg('create_volume',
|
return self.cast(ctxt, self.make_msg(
|
||||||
topic=topic,
|
'create_volume',
|
||||||
volume_id=volume_id,
|
topic=topic,
|
||||||
snapshot_id=snapshot_id,
|
volume_id=volume_id,
|
||||||
image_id=image_id,
|
snapshot_id=snapshot_id,
|
||||||
request_spec=request_spec,
|
image_id=image_id,
|
||||||
filter_properties=filter_properties),
|
request_spec=request_spec,
|
||||||
version='1.2')
|
filter_properties=filter_properties),
|
||||||
|
version='1.2')
|
||||||
|
|
||||||
def update_service_capabilities(self, ctxt, service_name, host,
|
def update_service_capabilities(self, ctxt,
|
||||||
capabilities):
|
service_name, host,
|
||||||
|
capabilities):
|
||||||
self.fanout_cast(ctxt, self.make_msg('update_service_capabilities',
|
self.fanout_cast(ctxt, self.make_msg('update_service_capabilities',
|
||||||
service_name=service_name, host=host,
|
service_name=service_name, host=host,
|
||||||
capabilities=capabilities))
|
capabilities=capabilities))
|
||||||
|
@ -33,8 +33,7 @@ from cinder import utils
|
|||||||
simple_scheduler_opts = [
|
simple_scheduler_opts = [
|
||||||
cfg.IntOpt("max_gigabytes",
|
cfg.IntOpt("max_gigabytes",
|
||||||
default=10000,
|
default=10000,
|
||||||
help="maximum number of volume gigabytes to allow per host"),
|
help="maximum number of volume gigabytes to allow per host"), ]
|
||||||
]
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
FLAGS.register_opts(simple_scheduler_opts)
|
FLAGS.register_opts(simple_scheduler_opts)
|
||||||
@ -63,10 +62,11 @@ class SimpleScheduler(chance.ChanceScheduler):
|
|||||||
if not utils.service_is_up(service):
|
if not utils.service_is_up(service):
|
||||||
raise exception.WillNotSchedule(host=host)
|
raise exception.WillNotSchedule(host=host)
|
||||||
updated_volume = driver.volume_update_db(context, volume_id, host)
|
updated_volume = driver.volume_update_db(context, volume_id, host)
|
||||||
self.volume_rpcapi.create_volume(context, updated_volume,
|
self.volume_rpcapi.create_volume(context,
|
||||||
host,
|
updated_volume,
|
||||||
snapshot_id,
|
host,
|
||||||
image_id)
|
snapshot_id,
|
||||||
|
image_id)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
results = db.service_get_all_volume_sorted(elevated)
|
results = db.service_get_all_volume_sorted(elevated)
|
||||||
@ -81,10 +81,11 @@ class SimpleScheduler(chance.ChanceScheduler):
|
|||||||
if utils.service_is_up(service) and not service['disabled']:
|
if utils.service_is_up(service) and not service['disabled']:
|
||||||
updated_volume = driver.volume_update_db(context, volume_id,
|
updated_volume = driver.volume_update_db(context, volume_id,
|
||||||
service['host'])
|
service['host'])
|
||||||
self.volume_rpcapi.create_volume(context, updated_volume,
|
self.volume_rpcapi.create_volume(context,
|
||||||
service['host'],
|
updated_volume,
|
||||||
snapshot_id,
|
service['host'],
|
||||||
image_id)
|
snapshot_id,
|
||||||
|
image_id)
|
||||||
return None
|
return None
|
||||||
msg = _("Is the appropriate service running?")
|
msg = _("Is the appropriate service running?")
|
||||||
raise exception.NoValidHost(reason=msg)
|
raise exception.NoValidHost(reason=msg)
|
||||||
|
@ -59,8 +59,7 @@ service_opts = [
|
|||||||
help='IP address for OpenStack Volume API to listen'),
|
help='IP address for OpenStack Volume API to listen'),
|
||||||
cfg.IntOpt('osapi_volume_listen_port',
|
cfg.IntOpt('osapi_volume_listen_port',
|
||||||
default=8776,
|
default=8776,
|
||||||
help='port for os volume api to listen'),
|
help='port for os volume api to listen'), ]
|
||||||
]
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
FLAGS.register_opts(service_opts)
|
FLAGS.register_opts(service_opts)
|
||||||
@ -305,7 +304,7 @@ class Service(object):
|
|||||||
state_catalog['availability_zone'] = zone
|
state_catalog['availability_zone'] = zone
|
||||||
|
|
||||||
db.service_update(ctxt,
|
db.service_update(ctxt,
|
||||||
self.service_id, state_catalog)
|
self.service_id, state_catalog)
|
||||||
|
|
||||||
# TODO(termie): make this pattern be more elegant.
|
# TODO(termie): make this pattern be more elegant.
|
||||||
if getattr(self, 'model_disconnected', False):
|
if getattr(self, 'model_disconnected', False):
|
||||||
|
@ -46,8 +46,7 @@ test_opts = [
|
|||||||
help='File name of clean sqlite db'),
|
help='File name of clean sqlite db'),
|
||||||
cfg.BoolOpt('fake_tests',
|
cfg.BoolOpt('fake_tests',
|
||||||
default=True,
|
default=True,
|
||||||
help='should we use everything for testing'),
|
help='should we use everything for testing'), ]
|
||||||
]
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
FLAGS.register_opts(test_opts)
|
FLAGS.register_opts(test_opts)
|
||||||
|
@ -131,8 +131,10 @@ class _Win32Colorizer(object):
|
|||||||
"""
|
"""
|
||||||
def __init__(self, stream):
|
def __init__(self, stream):
|
||||||
import win32console as win
|
import win32console as win
|
||||||
red, green, blue, bold = (win.FOREGROUND_RED, win.FOREGROUND_GREEN,
|
red, green, blue, bold = (win.FOREGROUND_RED,
|
||||||
win.FOREGROUND_BLUE, win.FOREGROUND_INTENSITY)
|
win.FOREGROUND_GREEN,
|
||||||
|
win.FOREGROUND_BLUE,
|
||||||
|
win.FOREGROUND_INTENSITY)
|
||||||
self.stream = stream
|
self.stream = stream
|
||||||
self.screenBuffer = win.GetStdHandle(win.STD_OUT_HANDLE)
|
self.screenBuffer = win.GetStdHandle(win.STD_OUT_HANDLE)
|
||||||
self._colors = {
|
self._colors = {
|
||||||
@ -143,8 +145,7 @@ class _Win32Colorizer(object):
|
|||||||
'yellow': red | green | bold,
|
'yellow': red | green | bold,
|
||||||
'magenta': red | blue | bold,
|
'magenta': red | blue | bold,
|
||||||
'cyan': green | blue | bold,
|
'cyan': green | blue | bold,
|
||||||
'white': red | green | blue | bold
|
'white': red | green | blue | bold}
|
||||||
}
|
|
||||||
|
|
||||||
def supported(cls, stream=sys.stdout):
|
def supported(cls, stream=sys.stdout):
|
||||||
try:
|
try:
|
||||||
@ -314,10 +315,10 @@ class CinderTestRunner(core.TextTestRunner):
|
|||||||
|
|
||||||
def _makeResult(self):
|
def _makeResult(self):
|
||||||
return CinderTestResult(self.stream,
|
return CinderTestResult(self.stream,
|
||||||
self.descriptions,
|
self.descriptions,
|
||||||
self.verbosity,
|
self.verbosity,
|
||||||
self.config,
|
self.config,
|
||||||
show_elapsed=self.show_elapsed)
|
show_elapsed=self.show_elapsed)
|
||||||
|
|
||||||
def _writeSlowTests(self, result_):
|
def _writeSlowTests(self, result_):
|
||||||
# Pare out 'fast' tests
|
# Pare out 'fast' tests
|
||||||
@ -359,9 +360,9 @@ def run():
|
|||||||
plugins=core.DefaultPluginManager())
|
plugins=core.DefaultPluginManager())
|
||||||
|
|
||||||
runner = CinderTestRunner(stream=c.stream,
|
runner = CinderTestRunner(stream=c.stream,
|
||||||
verbosity=c.verbosity,
|
verbosity=c.verbosity,
|
||||||
config=c,
|
config=c,
|
||||||
show_elapsed=not hide_elapsed)
|
show_elapsed=not hide_elapsed)
|
||||||
sys.exit(not core.run(config=c, testRunner=runner, argv=argv))
|
sys.exit(not core.run(config=c, testRunner=runner, argv=argv))
|
||||||
|
|
||||||
|
|
||||||
|
@ -33,17 +33,15 @@ UUID2 = '00000000-0000-0000-0000-000000000002'
|
|||||||
|
|
||||||
|
|
||||||
def _get_default_snapshot_param():
|
def _get_default_snapshot_param():
|
||||||
return {
|
return {'id': UUID1,
|
||||||
'id': UUID1,
|
'volume_id': 12,
|
||||||
'volume_id': 12,
|
'status': 'available',
|
||||||
'status': 'available',
|
'volume_size': 100,
|
||||||
'volume_size': 100,
|
'created_at': None,
|
||||||
'created_at': None,
|
'display_name': 'Default name',
|
||||||
'display_name': 'Default name',
|
'display_description': 'Default description',
|
||||||
'display_description': 'Default description',
|
'project_id': 'fake',
|
||||||
'project_id': 'fake',
|
'progress': '0%'}
|
||||||
'progress': '0%'
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def fake_snapshot_get(self, context, snapshot_id):
|
def fake_snapshot_get(self, context, snapshot_id):
|
||||||
@ -80,7 +78,7 @@ class ExtendedSnapshotAttributesTest(test.TestCase):
|
|||||||
|
|
||||||
def assertSnapshotAttributes(self, snapshot, project_id, progress):
|
def assertSnapshotAttributes(self, snapshot, project_id, progress):
|
||||||
self.assertEqual(snapshot.get('%sproject_id' % self.prefix),
|
self.assertEqual(snapshot.get('%sproject_id' % self.prefix),
|
||||||
project_id)
|
project_id)
|
||||||
self.assertEqual(snapshot.get('%sprogress' % self.prefix), progress)
|
self.assertEqual(snapshot.get('%sprogress' % self.prefix), progress)
|
||||||
|
|
||||||
def test_show(self):
|
def test_show(self):
|
||||||
@ -89,8 +87,8 @@ class ExtendedSnapshotAttributesTest(test.TestCase):
|
|||||||
|
|
||||||
self.assertEqual(res.status_int, 200)
|
self.assertEqual(res.status_int, 200)
|
||||||
self.assertSnapshotAttributes(self._get_snapshot(res.body),
|
self.assertSnapshotAttributes(self._get_snapshot(res.body),
|
||||||
project_id='fake',
|
project_id='fake',
|
||||||
progress='0%')
|
progress='0%')
|
||||||
|
|
||||||
def test_detail(self):
|
def test_detail(self):
|
||||||
url = '/v1/fake/snapshots/detail'
|
url = '/v1/fake/snapshots/detail'
|
||||||
@ -99,8 +97,8 @@ class ExtendedSnapshotAttributesTest(test.TestCase):
|
|||||||
self.assertEqual(res.status_int, 200)
|
self.assertEqual(res.status_int, 200)
|
||||||
for i, snapshot in enumerate(self._get_snapshots(res.body)):
|
for i, snapshot in enumerate(self._get_snapshots(res.body)):
|
||||||
self.assertSnapshotAttributes(snapshot,
|
self.assertSnapshotAttributes(snapshot,
|
||||||
project_id='fake',
|
project_id='fake',
|
||||||
progress='0%')
|
progress='0%')
|
||||||
|
|
||||||
def test_no_instance_passthrough_404(self):
|
def test_no_instance_passthrough_404(self):
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ class VolumeActionsTest(test.TestCase):
|
|||||||
app = fakes.wsgi_app()
|
app = fakes.wsgi_app()
|
||||||
for _action in self._actions:
|
for _action in self._actions:
|
||||||
req = webob.Request.blank('/v1/fake/volumes/%s/action' %
|
req = webob.Request.blank('/v1/fake/volumes/%s/action' %
|
||||||
self.UUID)
|
self.UUID)
|
||||||
req.method = 'POST'
|
req.method = 'POST'
|
||||||
req.body = jsonutils.dumps({_action: None})
|
req.body = jsonutils.dumps({_action: None})
|
||||||
req.content_type = 'application/json'
|
req.content_type = 'application/json'
|
||||||
@ -153,15 +153,15 @@ class VolumeImageActionsTest(test.TestCase):
|
|||||||
req = fakes.HTTPRequest.blank('/v1/tenant1/volumes/%s/action' % id)
|
req = fakes.HTTPRequest.blank('/v1/tenant1/volumes/%s/action' % id)
|
||||||
res_dict = self.controller._volume_upload_image(req, id, body)
|
res_dict = self.controller._volume_upload_image(req, id, body)
|
||||||
expected = {'os-volume_upload_image': {'id': id,
|
expected = {'os-volume_upload_image': {'id': id,
|
||||||
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
|
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
|
||||||
'status': 'uploading',
|
'status': 'uploading',
|
||||||
'display_description': 'displaydesc',
|
'display_description': 'displaydesc',
|
||||||
'size': 1,
|
'size': 1,
|
||||||
'volume_type': {'name': 'vol_type_name'},
|
'volume_type': {'name': 'vol_type_name'},
|
||||||
'image_id': 1,
|
'image_id': 1,
|
||||||
'container_format': 'bare',
|
'container_format': 'bare',
|
||||||
'disk_format': 'raw',
|
'disk_format': 'raw',
|
||||||
'image_name': 'image_name'}}
|
'image_name': 'image_name'}}
|
||||||
self.assertDictMatch(res_dict, expected)
|
self.assertDictMatch(res_dict, expected)
|
||||||
|
|
||||||
def test_copy_volume_to_image_volumenotfound(self):
|
def test_copy_volume_to_image_volumenotfound(self):
|
||||||
@ -185,7 +185,7 @@ class VolumeImageActionsTest(test.TestCase):
|
|||||||
|
|
||||||
def test_copy_volume_to_image_invalidvolume(self):
|
def test_copy_volume_to_image_invalidvolume(self):
|
||||||
def stub_upload_volume_to_image_service_raise(self, context, volume,
|
def stub_upload_volume_to_image_service_raise(self, context, volume,
|
||||||
metadata, force):
|
metadata, force):
|
||||||
raise exception.InvalidVolume
|
raise exception.InvalidVolume
|
||||||
self.stubs.Set(volume_api.API,
|
self.stubs.Set(volume_api.API,
|
||||||
"copy_volume_to_image",
|
"copy_volume_to_image",
|
||||||
@ -206,7 +206,7 @@ class VolumeImageActionsTest(test.TestCase):
|
|||||||
|
|
||||||
def test_copy_volume_to_image_valueerror(self):
|
def test_copy_volume_to_image_valueerror(self):
|
||||||
def stub_upload_volume_to_image_service_raise(self, context, volume,
|
def stub_upload_volume_to_image_service_raise(self, context, volume,
|
||||||
metadata, force):
|
metadata, force):
|
||||||
raise ValueError
|
raise ValueError
|
||||||
self.stubs.Set(volume_api.API,
|
self.stubs.Set(volume_api.API,
|
||||||
"copy_volume_to_image",
|
"copy_volume_to_image",
|
||||||
@ -227,7 +227,7 @@ class VolumeImageActionsTest(test.TestCase):
|
|||||||
|
|
||||||
def test_copy_volume_to_image_remoteerror(self):
|
def test_copy_volume_to_image_remoteerror(self):
|
||||||
def stub_upload_volume_to_image_service_raise(self, context, volume,
|
def stub_upload_volume_to_image_service_raise(self, context, volume,
|
||||||
metadata, force):
|
metadata, force):
|
||||||
raise rpc_common.RemoteError
|
raise rpc_common.RemoteError
|
||||||
self.stubs.Set(volume_api.API,
|
self.stubs.Set(volume_api.API,
|
||||||
"copy_volume_to_image",
|
"copy_volume_to_image",
|
||||||
|
@ -121,7 +121,7 @@ class VolumeTenantAttributeTest(test.TestCase):
|
|||||||
res = req.get_response(app())
|
res = req.get_response(app())
|
||||||
vol = etree.XML(res.body)
|
vol = etree.XML(res.body)
|
||||||
tenant_key = ('{http://docs.openstack.org/volume/ext/'
|
tenant_key = ('{http://docs.openstack.org/volume/ext/'
|
||||||
'volume_tenant_attribute/api/v1}tenant_id')
|
'volume_tenant_attribute/api/v1}tenant_id')
|
||||||
self.assertEqual(vol.get(tenant_key), PROJECT_ID)
|
self.assertEqual(vol.get(tenant_key), PROJECT_ID)
|
||||||
|
|
||||||
def test_list_volumes_detail_xml(self):
|
def test_list_volumes_detail_xml(self):
|
||||||
@ -133,5 +133,5 @@ class VolumeTenantAttributeTest(test.TestCase):
|
|||||||
res = req.get_response(app())
|
res = req.get_response(app())
|
||||||
vol = list(etree.XML(res.body))[0]
|
vol = list(etree.XML(res.body))[0]
|
||||||
tenant_key = ('{http://docs.openstack.org/volume/ext/'
|
tenant_key = ('{http://docs.openstack.org/volume/ext/'
|
||||||
'volume_tenant_attribute/api/v1}tenant_id')
|
'volume_tenant_attribute/api/v1}tenant_id')
|
||||||
self.assertEqual(vol.get(tenant_key), PROJECT_ID)
|
self.assertEqual(vol.get(tenant_key), PROJECT_ID)
|
||||||
|
@ -74,7 +74,7 @@ class Foxinsocks(extensions.ExtensionDescriptor):
|
|||||||
def get_resources(self):
|
def get_resources(self):
|
||||||
resources = []
|
resources = []
|
||||||
resource = extensions.ResourceExtension('foxnsocks',
|
resource = extensions.ResourceExtension('foxnsocks',
|
||||||
FoxInSocksController())
|
FoxInSocksController())
|
||||||
resources.append(resource)
|
resources.append(resource)
|
||||||
return resources
|
return resources
|
||||||
|
|
||||||
@ -84,8 +84,7 @@ class Foxinsocks(extensions.ExtensionDescriptor):
|
|||||||
extension_set = [
|
extension_set = [
|
||||||
(FoxInSocksServerControllerExtension, 'servers'),
|
(FoxInSocksServerControllerExtension, 'servers'),
|
||||||
(FoxInSocksFlavorGooseControllerExtension, 'flavors'),
|
(FoxInSocksFlavorGooseControllerExtension, 'flavors'),
|
||||||
(FoxInSocksFlavorBandsControllerExtension, 'flavors'),
|
(FoxInSocksFlavorBandsControllerExtension, 'flavors'), ]
|
||||||
]
|
|
||||||
for klass, collection in extension_set:
|
for klass, collection in extension_set:
|
||||||
controller = klass()
|
controller = klass()
|
||||||
ext = extensions.ControllerExtension(self, collection, controller)
|
ext = extensions.ControllerExtension(self, collection, controller)
|
||||||
|
@ -69,7 +69,7 @@ class TestFaults(test.TestCase):
|
|||||||
for request in requests:
|
for request in requests:
|
||||||
exc = webob.exc.HTTPRequestEntityTooLarge
|
exc = webob.exc.HTTPRequestEntityTooLarge
|
||||||
fault = wsgi.Fault(exc(explanation='sorry',
|
fault = wsgi.Fault(exc(explanation='sorry',
|
||||||
headers={'Retry-After': 4}))
|
headers={'Retry-After': 4}))
|
||||||
response = request.get_response(fault)
|
response = request.get_response(fault)
|
||||||
|
|
||||||
expected = {
|
expected = {
|
||||||
|
@ -62,7 +62,7 @@ def fake_wsgi(self, req):
|
|||||||
|
|
||||||
|
|
||||||
def wsgi_app(inner_app_v1=None, fake_auth=True, fake_auth_context=None,
|
def wsgi_app(inner_app_v1=None, fake_auth=True, fake_auth_context=None,
|
||||||
use_no_auth=False, ext_mgr=None):
|
use_no_auth=False, ext_mgr=None):
|
||||||
if not inner_app_v1:
|
if not inner_app_v1:
|
||||||
inner_app_v1 = router.APIRouter(ext_mgr)
|
inner_app_v1 = router.APIRouter(ext_mgr)
|
||||||
|
|
||||||
@ -72,13 +72,13 @@ def wsgi_app(inner_app_v1=None, fake_auth=True, fake_auth_context=None,
|
|||||||
else:
|
else:
|
||||||
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
|
ctxt = context.RequestContext('fake', 'fake', auth_token=True)
|
||||||
api_v1 = fault.FaultWrapper(auth.InjectContext(ctxt,
|
api_v1 = fault.FaultWrapper(auth.InjectContext(ctxt,
|
||||||
inner_app_v1))
|
inner_app_v1))
|
||||||
elif use_no_auth:
|
elif use_no_auth:
|
||||||
api_v1 = fault.FaultWrapper(auth.NoAuthMiddleware(
|
api_v1 = fault.FaultWrapper(auth.NoAuthMiddleware(
|
||||||
limits.RateLimitingMiddleware(inner_app_v1)))
|
limits.RateLimitingMiddleware(inner_app_v1)))
|
||||||
else:
|
else:
|
||||||
api_v1 = fault.FaultWrapper(auth.AuthMiddleware(
|
api_v1 = fault.FaultWrapper(auth.AuthMiddleware(
|
||||||
limits.RateLimitingMiddleware(inner_app_v1)))
|
limits.RateLimitingMiddleware(inner_app_v1)))
|
||||||
|
|
||||||
mapper = urlmap.URLMap()
|
mapper = urlmap.URLMap()
|
||||||
mapper['/v1'] = api_v1
|
mapper['/v1'] = api_v1
|
||||||
@ -125,8 +125,10 @@ class HTTPRequest(webob.Request):
|
|||||||
kwargs['base_url'] = 'http://localhost/v1'
|
kwargs['base_url'] = 'http://localhost/v1'
|
||||||
use_admin_context = kwargs.pop('use_admin_context', False)
|
use_admin_context = kwargs.pop('use_admin_context', False)
|
||||||
out = webob.Request.blank(*args, **kwargs)
|
out = webob.Request.blank(*args, **kwargs)
|
||||||
out.environ['cinder.context'] = FakeRequestContext('fake_user', 'fake',
|
out.environ['cinder.context'] = FakeRequestContext(
|
||||||
is_admin=use_admin_context)
|
'fake_user',
|
||||||
|
'fake',
|
||||||
|
is_admin=use_admin_context)
|
||||||
return out
|
return out
|
||||||
|
|
||||||
|
|
||||||
@ -254,16 +256,14 @@ def stub_volume_get_all_by_project(self, context, search_opts=None):
|
|||||||
|
|
||||||
|
|
||||||
def stub_snapshot(id, **kwargs):
|
def stub_snapshot(id, **kwargs):
|
||||||
snapshot = {
|
snapshot = {'id': id,
|
||||||
'id': id,
|
'volume_id': 12,
|
||||||
'volume_id': 12,
|
'status': 'available',
|
||||||
'status': 'available',
|
'volume_size': 100,
|
||||||
'volume_size': 100,
|
'created_at': None,
|
||||||
'created_at': None,
|
'display_name': 'Default name',
|
||||||
'display_name': 'Default name',
|
'display_description': 'Default description',
|
||||||
'display_description': 'Default description',
|
'project_id': 'fake'}
|
||||||
'project_id': 'fake'
|
|
||||||
}
|
|
||||||
|
|
||||||
snapshot.update(kwargs)
|
snapshot.update(kwargs)
|
||||||
return snapshot
|
return snapshot
|
||||||
|
@ -441,10 +441,9 @@ class ResourceTest(test.TestCase):
|
|||||||
|
|
||||||
extended = ControllerExtended()
|
extended = ControllerExtended()
|
||||||
resource.register_actions(extended)
|
resource.register_actions(extended)
|
||||||
self.assertEqual({
|
self.assertEqual({'fooAction': extended._action_foo,
|
||||||
'fooAction': extended._action_foo,
|
'barAction': extended._action_bar, },
|
||||||
'barAction': extended._action_bar,
|
resource.wsgi_actions)
|
||||||
}, resource.wsgi_actions)
|
|
||||||
|
|
||||||
def test_register_extensions(self):
|
def test_register_extensions(self):
|
||||||
class Controller(object):
|
class Controller(object):
|
||||||
|
@ -15,17 +15,17 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from lxml import etree
|
import datetime
|
||||||
import webob.exc
|
import webob.exc
|
||||||
|
|
||||||
from cinder.api.openstack.volume.contrib import hosts as os_hosts
|
from cinder.api.openstack.volume.contrib import hosts as os_hosts
|
||||||
from cinder import context
|
from cinder import context
|
||||||
import datetime
|
|
||||||
from cinder import db
|
from cinder import db
|
||||||
from cinder import flags
|
from cinder import flags
|
||||||
from cinder.openstack.common import log as logging
|
from cinder.openstack.common import log as logging
|
||||||
from cinder.openstack.common import timeutils
|
from cinder.openstack.common import timeutils
|
||||||
from cinder import test
|
from cinder import test
|
||||||
|
from lxml import etree
|
||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
@ -34,18 +34,18 @@ created_time = datetime.datetime(2012, 11, 14, 1, 20, 41, 95099)
|
|||||||
curr_time = timeutils.utcnow()
|
curr_time = timeutils.utcnow()
|
||||||
|
|
||||||
SERVICE_LIST = [
|
SERVICE_LIST = [
|
||||||
{'created_at': created_time, 'updated_at': curr_time,
|
{'created_at': created_time, 'updated_at': curr_time,
|
||||||
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
|
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
|
||||||
'availability_zone': 'cinder'},
|
'availability_zone': 'cinder'},
|
||||||
{'created_at': created_time, 'updated_at': curr_time,
|
{'created_at': created_time, 'updated_at': curr_time,
|
||||||
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
|
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
|
||||||
'availability_zone': 'cinder'},
|
'availability_zone': 'cinder'},
|
||||||
{'created_at': created_time, 'updated_at': curr_time,
|
{'created_at': created_time, 'updated_at': curr_time,
|
||||||
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
|
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
|
||||||
'availability_zone': 'cinder'},
|
'availability_zone': 'cinder'},
|
||||||
{'created_at': created_time, 'updated_at': curr_time,
|
{'created_at': created_time, 'updated_at': curr_time,
|
||||||
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
|
'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0,
|
||||||
'availability_zone': 'cinder'}]
|
'availability_zone': 'cinder'}]
|
||||||
|
|
||||||
LIST_RESPONSE = [{'service-status': 'available', 'service': 'cinder-volume',
|
LIST_RESPONSE = [{'service-status': 'available', 'service': 'cinder-volume',
|
||||||
'zone': 'cinder', 'service-state': 'enabled',
|
'zone': 'cinder', 'service-state': 'enabled',
|
||||||
@ -97,7 +97,7 @@ class HostTestCase(test.TestCase):
|
|||||||
|
|
||||||
cinder_hosts = os_hosts._list_hosts(self.req, 'cinder-volume')
|
cinder_hosts = os_hosts._list_hosts(self.req, 'cinder-volume')
|
||||||
expected = [host for host in LIST_RESPONSE
|
expected = [host for host in LIST_RESPONSE
|
||||||
if host['service'] == 'cinder-volume']
|
if host['service'] == 'cinder-volume']
|
||||||
self.assertEqual(cinder_hosts, expected)
|
self.assertEqual(cinder_hosts, expected)
|
||||||
|
|
||||||
def test_list_hosts_with_zone(self):
|
def test_list_hosts_with_zone(self):
|
||||||
@ -107,19 +107,22 @@ class HostTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_bad_status_value(self):
|
def test_bad_status_value(self):
|
||||||
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
|
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
|
||||||
self.req, 'test.host.1', body={'status': 'bad'})
|
self.req, 'test.host.1', body={'status': 'bad'})
|
||||||
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
|
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||||
self.req, 'test.host.1', body={'status': 'disablabc'})
|
self.controller.update,
|
||||||
|
self.req,
|
||||||
|
'test.host.1',
|
||||||
|
body={'status': 'disablabc'})
|
||||||
|
|
||||||
def test_bad_update_key(self):
|
def test_bad_update_key(self):
|
||||||
bad_body = {'crazy': 'bad'}
|
bad_body = {'crazy': 'bad'}
|
||||||
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
|
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
|
||||||
self.req, 'test.host.1', body=bad_body)
|
self.req, 'test.host.1', body=bad_body)
|
||||||
|
|
||||||
def test_bad_update_key_and_correct_udpate_key(self):
|
def test_bad_update_key_and_correct_udpate_key(self):
|
||||||
bad_body = {'status': 'disable', 'crazy': 'bad'}
|
bad_body = {'status': 'disable', 'crazy': 'bad'}
|
||||||
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
|
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
|
||||||
self.req, 'test.host.1', body=bad_body)
|
self.req, 'test.host.1', body=bad_body)
|
||||||
|
|
||||||
def test_good_udpate_keys(self):
|
def test_good_udpate_keys(self):
|
||||||
body = {'status': 'disable'}
|
body = {'status': 'disable'}
|
||||||
@ -127,8 +130,11 @@ class HostTestCase(test.TestCase):
|
|||||||
self.req, 'test.host.1', body=body)
|
self.req, 'test.host.1', body=body)
|
||||||
|
|
||||||
def test_bad_host(self):
|
def test_bad_host(self):
|
||||||
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
|
self.assertRaises(webob.exc.HTTPNotFound,
|
||||||
self.req, 'bogus_host_name', body={'disabled': 0})
|
self.controller.update,
|
||||||
|
self.req,
|
||||||
|
'bogus_host_name',
|
||||||
|
body={'disabled': 0})
|
||||||
|
|
||||||
def test_show_forbidden(self):
|
def test_show_forbidden(self):
|
||||||
self.req.environ['cinder.context'].is_admin = False
|
self.req.environ['cinder.context'].is_admin = False
|
||||||
|
@ -44,10 +44,7 @@ class ExtensionControllerTest(ExtensionTestCase):
|
|||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(ExtensionControllerTest, self).setUp()
|
super(ExtensionControllerTest, self).setUp()
|
||||||
self.ext_list = [
|
self.ext_list = ["TypesManage", "TypesExtraSpecs", ]
|
||||||
"TypesManage",
|
|
||||||
"TypesExtraSpecs",
|
|
||||||
]
|
|
||||||
self.ext_list.sort()
|
self.ext_list.sort()
|
||||||
|
|
||||||
def test_list_extensions_json(self):
|
def test_list_extensions_json(self):
|
||||||
@ -70,15 +67,13 @@ class ExtensionControllerTest(ExtensionTestCase):
|
|||||||
# Make sure that at least Fox in Sox is correct.
|
# Make sure that at least Fox in Sox is correct.
|
||||||
(fox_ext, ) = [
|
(fox_ext, ) = [
|
||||||
x for x in data['extensions'] if x['alias'] == 'FOXNSOX']
|
x for x in data['extensions'] if x['alias'] == 'FOXNSOX']
|
||||||
self.assertEqual(fox_ext, {
|
self.assertEqual(
|
||||||
'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0',
|
fox_ext, {'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0',
|
||||||
'name': 'Fox In Socks',
|
'name': 'Fox In Socks',
|
||||||
'updated': '2011-01-22T13:25:27-06:00',
|
'updated': '2011-01-22T13:25:27-06:00',
|
||||||
'description': 'The Fox In Socks Extension',
|
'description': 'The Fox In Socks Extension',
|
||||||
'alias': 'FOXNSOX',
|
'alias': 'FOXNSOX',
|
||||||
'links': []
|
'links': []}, )
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
for ext in data['extensions']:
|
for ext in data['extensions']:
|
||||||
url = '/fake/extensions/%s' % ext['alias']
|
url = '/fake/extensions/%s' % ext['alias']
|
||||||
@ -94,13 +89,14 @@ class ExtensionControllerTest(ExtensionTestCase):
|
|||||||
self.assertEqual(200, response.status_int)
|
self.assertEqual(200, response.status_int)
|
||||||
|
|
||||||
data = jsonutils.loads(response.body)
|
data = jsonutils.loads(response.body)
|
||||||
self.assertEqual(data['extension'], {
|
self.assertEqual(
|
||||||
"namespace": "http://www.fox.in.socks/api/ext/pie/v1.0",
|
data['extension'],
|
||||||
"name": "Fox In Socks",
|
{"namespace": "http://www.fox.in.socks/api/ext/pie/v1.0",
|
||||||
"updated": "2011-01-22T13:25:27-06:00",
|
"name": "Fox In Socks",
|
||||||
"description": "The Fox In Socks Extension",
|
"updated": "2011-01-22T13:25:27-06:00",
|
||||||
"alias": "FOXNSOX",
|
"description": "The Fox In Socks Extension",
|
||||||
"links": []})
|
"alias": "FOXNSOX",
|
||||||
|
"links": []})
|
||||||
|
|
||||||
def test_get_non_existing_extension_json(self):
|
def test_get_non_existing_extension_json(self):
|
||||||
app = router.APIRouter()
|
app = router.APIRouter()
|
||||||
@ -125,10 +121,12 @@ class ExtensionControllerTest(ExtensionTestCase):
|
|||||||
# Make sure that at least Fox in Sox is correct.
|
# Make sure that at least Fox in Sox is correct.
|
||||||
(fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX']
|
(fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX']
|
||||||
self.assertEqual(fox_ext.get('name'), 'Fox In Socks')
|
self.assertEqual(fox_ext.get('name'), 'Fox In Socks')
|
||||||
self.assertEqual(fox_ext.get('namespace'),
|
self.assertEqual(
|
||||||
|
fox_ext.get('namespace'),
|
||||||
'http://www.fox.in.socks/api/ext/pie/v1.0')
|
'http://www.fox.in.socks/api/ext/pie/v1.0')
|
||||||
self.assertEqual(fox_ext.get('updated'), '2011-01-22T13:25:27-06:00')
|
self.assertEqual(fox_ext.get('updated'), '2011-01-22T13:25:27-06:00')
|
||||||
self.assertEqual(fox_ext.findtext('{0}description'.format(NS)),
|
self.assertEqual(
|
||||||
|
fox_ext.findtext('{0}description'.format(NS)),
|
||||||
'The Fox In Socks Extension')
|
'The Fox In Socks Extension')
|
||||||
|
|
||||||
xmlutil.validate_schema(root, 'extensions')
|
xmlutil.validate_schema(root, 'extensions')
|
||||||
@ -145,10 +143,12 @@ class ExtensionControllerTest(ExtensionTestCase):
|
|||||||
self.assertEqual(root.tag.split('extension')[0], NS)
|
self.assertEqual(root.tag.split('extension')[0], NS)
|
||||||
self.assertEqual(root.get('alias'), 'FOXNSOX')
|
self.assertEqual(root.get('alias'), 'FOXNSOX')
|
||||||
self.assertEqual(root.get('name'), 'Fox In Socks')
|
self.assertEqual(root.get('name'), 'Fox In Socks')
|
||||||
self.assertEqual(root.get('namespace'),
|
self.assertEqual(
|
||||||
|
root.get('namespace'),
|
||||||
'http://www.fox.in.socks/api/ext/pie/v1.0')
|
'http://www.fox.in.socks/api/ext/pie/v1.0')
|
||||||
self.assertEqual(root.get('updated'), '2011-01-22T13:25:27-06:00')
|
self.assertEqual(root.get('updated'), '2011-01-22T13:25:27-06:00')
|
||||||
self.assertEqual(root.findtext('{0}description'.format(NS)),
|
self.assertEqual(
|
||||||
|
root.findtext('{0}description'.format(NS)),
|
||||||
'The Fox In Socks Extension')
|
'The Fox In Socks Extension')
|
||||||
|
|
||||||
xmlutil.validate_schema(root, 'extension')
|
xmlutil.validate_schema(root, 'extension')
|
||||||
|
@ -22,17 +22,11 @@ from cinder import test
|
|||||||
|
|
||||||
|
|
||||||
class SelectorTest(test.TestCase):
|
class SelectorTest(test.TestCase):
|
||||||
obj_for_test = {
|
obj_for_test = {'test': {'name': 'test',
|
||||||
'test': {
|
'values': [1, 2, 3],
|
||||||
'name': 'test',
|
'attrs': {'foo': 1,
|
||||||
'values': [1, 2, 3],
|
'bar': 2,
|
||||||
'attrs': {
|
'baz': 3, }, }, }
|
||||||
'foo': 1,
|
|
||||||
'bar': 2,
|
|
||||||
'baz': 3,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
def test_empty_selector(self):
|
def test_empty_selector(self):
|
||||||
sel = xmlutil.Selector()
|
sel = xmlutil.Selector()
|
||||||
@ -217,11 +211,9 @@ class TemplateElementTest(test.TestCase):
|
|||||||
self.assertEqual(len(elem), 0)
|
self.assertEqual(len(elem), 0)
|
||||||
|
|
||||||
# Create a few children
|
# Create a few children
|
||||||
children = [
|
children = [xmlutil.TemplateElement('child1'),
|
||||||
xmlutil.TemplateElement('child1'),
|
xmlutil.TemplateElement('child2'),
|
||||||
xmlutil.TemplateElement('child2'),
|
xmlutil.TemplateElement('child3'), ]
|
||||||
xmlutil.TemplateElement('child3'),
|
|
||||||
]
|
|
||||||
|
|
||||||
# Extend the parent by those children
|
# Extend the parent by those children
|
||||||
elem.extend(children)
|
elem.extend(children)
|
||||||
@ -234,10 +226,8 @@ class TemplateElementTest(test.TestCase):
|
|||||||
self.assertEqual(elem[children[idx].tag], children[idx])
|
self.assertEqual(elem[children[idx].tag], children[idx])
|
||||||
|
|
||||||
# Ensure that multiple children of the same name are rejected
|
# Ensure that multiple children of the same name are rejected
|
||||||
children2 = [
|
children2 = [xmlutil.TemplateElement('child4'),
|
||||||
xmlutil.TemplateElement('child4'),
|
xmlutil.TemplateElement('child1'), ]
|
||||||
xmlutil.TemplateElement('child1'),
|
|
||||||
]
|
|
||||||
self.assertRaises(KeyError, elem.extend, children2)
|
self.assertRaises(KeyError, elem.extend, children2)
|
||||||
|
|
||||||
# Also ensure that child4 was not added
|
# Also ensure that child4 was not added
|
||||||
@ -252,11 +242,9 @@ class TemplateElementTest(test.TestCase):
|
|||||||
self.assertEqual(len(elem), 0)
|
self.assertEqual(len(elem), 0)
|
||||||
|
|
||||||
# Create a few children
|
# Create a few children
|
||||||
children = [
|
children = [xmlutil.TemplateElement('child1'),
|
||||||
xmlutil.TemplateElement('child1'),
|
xmlutil.TemplateElement('child2'),
|
||||||
xmlutil.TemplateElement('child2'),
|
xmlutil.TemplateElement('child3'), ]
|
||||||
xmlutil.TemplateElement('child3'),
|
|
||||||
]
|
|
||||||
|
|
||||||
# Extend the parent by those children
|
# Extend the parent by those children
|
||||||
elem.extend(children)
|
elem.extend(children)
|
||||||
@ -287,11 +275,9 @@ class TemplateElementTest(test.TestCase):
|
|||||||
self.assertEqual(len(elem), 0)
|
self.assertEqual(len(elem), 0)
|
||||||
|
|
||||||
# Create a few children
|
# Create a few children
|
||||||
children = [
|
children = [xmlutil.TemplateElement('child1'),
|
||||||
xmlutil.TemplateElement('child1'),
|
xmlutil.TemplateElement('child2'),
|
||||||
xmlutil.TemplateElement('child2'),
|
xmlutil.TemplateElement('child3'), ]
|
||||||
xmlutil.TemplateElement('child3'),
|
|
||||||
]
|
|
||||||
|
|
||||||
# Extend the parent by those children
|
# Extend the parent by those children
|
||||||
elem.extend(children)
|
elem.extend(children)
|
||||||
@ -384,10 +370,8 @@ class TemplateElementTest(test.TestCase):
|
|||||||
master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1'])
|
master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1'])
|
||||||
|
|
||||||
# Create a couple of slave template element
|
# Create a couple of slave template element
|
||||||
slave_elems = [
|
slave_elems = [xmlutil.TemplateElement('test', attr2=attrs['attr2']),
|
||||||
xmlutil.TemplateElement('test', attr2=attrs['attr2']),
|
xmlutil.TemplateElement('test', attr3=attrs['attr3']), ]
|
||||||
xmlutil.TemplateElement('test', attr3=attrs['attr3']),
|
|
||||||
]
|
|
||||||
|
|
||||||
# Try the render
|
# Try the render
|
||||||
elem = master_elem._render(None, None, slave_elems, None)
|
elem = master_elem._render(None, None, slave_elems, None)
|
||||||
@ -589,22 +573,13 @@ class TemplateTest(test.TestCase):
|
|||||||
|
|
||||||
def test__serialize(self):
|
def test__serialize(self):
|
||||||
# Our test object to serialize
|
# Our test object to serialize
|
||||||
obj = {
|
obj = {'test': {'name': 'foobar',
|
||||||
'test': {
|
'values': [1, 2, 3, 4],
|
||||||
'name': 'foobar',
|
'attrs': {'a': 1,
|
||||||
'values': [1, 2, 3, 4],
|
'b': 2,
|
||||||
'attrs': {
|
'c': 3,
|
||||||
'a': 1,
|
'd': 4, },
|
||||||
'b': 2,
|
'image': {'name': 'image_foobar', 'id': 42, }, }, }
|
||||||
'c': 3,
|
|
||||||
'd': 4,
|
|
||||||
},
|
|
||||||
'image': {
|
|
||||||
'name': 'image_foobar',
|
|
||||||
'id': 42,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
# Set up our master template
|
# Set up our master template
|
||||||
root = xmlutil.TemplateElement('test', selector='test',
|
root = xmlutil.TemplateElement('test', selector='test',
|
||||||
|
@ -159,10 +159,8 @@ class LimitsControllerTest(BaseLimitTestSuite):
|
|||||||
},
|
},
|
||||||
|
|
||||||
],
|
],
|
||||||
"absolute": {
|
"absolute": {"maxTotalVolumeGigabytes": 512,
|
||||||
"maxTotalVolumeGigabytes": 512,
|
"maxTotalVolumes": 5, },
|
||||||
"maxTotalVolumes": 5,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
body = jsonutils.loads(response.body)
|
body = jsonutils.loads(response.body)
|
||||||
@ -776,26 +774,26 @@ class LimitsViewBuilderTest(test.TestCase):
|
|||||||
"injected_file_content_bytes": 5}
|
"injected_file_content_bytes": 5}
|
||||||
|
|
||||||
def test_build_limits(self):
|
def test_build_limits(self):
|
||||||
expected_limits = {"limits": {
|
tdate = "2011-07-21T18:17:06Z"
|
||||||
"rate": [{
|
expected_limits = \
|
||||||
"uri": "*",
|
{"limits": {"rate": [{"uri": "*",
|
||||||
"regex": ".*",
|
"regex": ".*",
|
||||||
"limit": [{"value": 10,
|
"limit": [{"value": 10,
|
||||||
"verb": "POST",
|
"verb": "POST",
|
||||||
"remaining": 2,
|
"remaining": 2,
|
||||||
"unit": "MINUTE",
|
"unit": "MINUTE",
|
||||||
"next-available": "2011-07-21T18:17:06Z"}]},
|
"next-available": tdate}]},
|
||||||
{"uri": "*/volumes",
|
{"uri": "*/volumes",
|
||||||
"regex": "^/volumes",
|
"regex": "^/volumes",
|
||||||
"limit": [{"value": 50,
|
"limit": [{"value": 50,
|
||||||
"verb": "POST",
|
"verb": "POST",
|
||||||
"remaining": 10,
|
"remaining": 10,
|
||||||
"unit": "DAY",
|
"unit": "DAY",
|
||||||
"next-available": "2011-07-21T18:17:06Z"}]}],
|
"next-available": tdate}]}],
|
||||||
"absolute": {"maxServerMeta": 1,
|
"absolute": {"maxServerMeta": 1,
|
||||||
"maxImageMeta": 1,
|
"maxImageMeta": 1,
|
||||||
"maxPersonality": 5,
|
"maxPersonality": 5,
|
||||||
"maxPersonalitySize": 5}}}
|
"maxPersonalitySize": 5}}}
|
||||||
|
|
||||||
output = self.view_builder.build(self.rate_limits,
|
output = self.view_builder.build(self.rate_limits,
|
||||||
self.absolute_limits)
|
self.absolute_limits)
|
||||||
@ -827,27 +825,27 @@ class LimitsXMLSerializationTest(test.TestCase):
|
|||||||
serializer = limits.LimitsTemplate()
|
serializer = limits.LimitsTemplate()
|
||||||
fixture = {
|
fixture = {
|
||||||
"limits": {
|
"limits": {
|
||||||
"rate": [{
|
"rate": [{
|
||||||
"uri": "*",
|
"uri": "*",
|
||||||
"regex": ".*",
|
"regex": ".*",
|
||||||
"limit": [{
|
"limit": [{
|
||||||
"value": 10,
|
"value": 10,
|
||||||
"verb": "POST",
|
"verb": "POST",
|
||||||
"remaining": 2,
|
"remaining": 2,
|
||||||
"unit": "MINUTE",
|
"unit": "MINUTE",
|
||||||
"next-available": "2011-12-15T22:42:45Z"}]},
|
"next-available": "2011-12-15T22:42:45Z"}]},
|
||||||
{"uri": "*/servers",
|
{"uri": "*/servers",
|
||||||
"regex": "^/servers",
|
"regex": "^/servers",
|
||||||
"limit": [{
|
"limit": [{
|
||||||
"value": 50,
|
"value": 50,
|
||||||
"verb": "POST",
|
"verb": "POST",
|
||||||
"remaining": 10,
|
"remaining": 10,
|
||||||
"unit": "DAY",
|
"unit": "DAY",
|
||||||
"next-available": "2011-12-15T22:42:45Z"}]}],
|
"next-available": "2011-12-15T22:42:45Z"}]}],
|
||||||
"absolute": {"maxServerMeta": 1,
|
"absolute": {"maxServerMeta": 1,
|
||||||
"maxImageMeta": 1,
|
"maxImageMeta": 1,
|
||||||
"maxPersonality": 5,
|
"maxPersonality": 5,
|
||||||
"maxPersonalitySize": 10240}}}
|
"maxPersonalitySize": 10240}}}
|
||||||
|
|
||||||
output = serializer.serialize(fixture)
|
output = serializer.serialize(fixture)
|
||||||
root = etree.XML(output)
|
root = etree.XML(output)
|
||||||
@ -873,8 +871,9 @@ class LimitsXMLSerializationTest(test.TestCase):
|
|||||||
for j, limit in enumerate(rate_limits):
|
for j, limit in enumerate(rate_limits):
|
||||||
for key in ['verb', 'value', 'remaining', 'unit',
|
for key in ['verb', 'value', 'remaining', 'unit',
|
||||||
'next-available']:
|
'next-available']:
|
||||||
self.assertEqual(limit.get(key),
|
self.assertEqual(
|
||||||
str(fixture['limits']['rate'][i]['limit'][j][key]))
|
limit.get(key),
|
||||||
|
str(fixture['limits']['rate'][i]['limit'][j][key]))
|
||||||
|
|
||||||
def test_index_no_limits(self):
|
def test_index_no_limits(self):
|
||||||
serializer = limits.LimitsTemplate()
|
serializer = limits.LimitsTemplate()
|
||||||
|
@ -36,15 +36,13 @@ INVALID_UUID = '00000000-0000-0000-0000-000000000002'
|
|||||||
|
|
||||||
|
|
||||||
def _get_default_snapshot_param():
|
def _get_default_snapshot_param():
|
||||||
return {
|
return {'id': UUID,
|
||||||
'id': UUID,
|
'volume_id': 12,
|
||||||
'volume_id': 12,
|
'status': 'available',
|
||||||
'status': 'available',
|
'volume_size': 100,
|
||||||
'volume_size': 100,
|
'created_at': None,
|
||||||
'created_at': None,
|
'display_name': 'Default name',
|
||||||
'display_name': 'Default name',
|
'display_description': 'Default description', }
|
||||||
'display_description': 'Default description',
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def stub_snapshot_create(self, context, volume_id, name, description):
|
def stub_snapshot_create(self, context, volume_id, name, description):
|
||||||
@ -81,47 +79,48 @@ class SnapshotApiTest(test.TestCase):
|
|||||||
self.stubs.Set(db, 'snapshot_get_all_by_project',
|
self.stubs.Set(db, 'snapshot_get_all_by_project',
|
||||||
fakes.stub_snapshot_get_all_by_project)
|
fakes.stub_snapshot_get_all_by_project)
|
||||||
self.stubs.Set(db, 'snapshot_get_all',
|
self.stubs.Set(db, 'snapshot_get_all',
|
||||||
fakes.stub_snapshot_get_all)
|
fakes.stub_snapshot_get_all)
|
||||||
|
|
||||||
def test_snapshot_create(self):
|
def test_snapshot_create(self):
|
||||||
self.stubs.Set(volume.api.API, "create_snapshot", stub_snapshot_create)
|
self.stubs.Set(volume.api.API, "create_snapshot", stub_snapshot_create)
|
||||||
self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get)
|
self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get)
|
||||||
snapshot = {"volume_id": '12',
|
snapshot = {"volume_id": '12',
|
||||||
"force": False,
|
"force": False,
|
||||||
"display_name": "Snapshot Test Name",
|
"display_name": "Snapshot Test Name",
|
||||||
"display_description": "Snapshot Test Desc"}
|
"display_description": "Snapshot Test Desc"}
|
||||||
body = dict(snapshot=snapshot)
|
body = dict(snapshot=snapshot)
|
||||||
req = fakes.HTTPRequest.blank('/v1/snapshots')
|
req = fakes.HTTPRequest.blank('/v1/snapshots')
|
||||||
resp_dict = self.controller.create(req, body)
|
resp_dict = self.controller.create(req, body)
|
||||||
|
|
||||||
self.assertTrue('snapshot' in resp_dict)
|
self.assertTrue('snapshot' in resp_dict)
|
||||||
self.assertEqual(resp_dict['snapshot']['display_name'],
|
self.assertEqual(resp_dict['snapshot']['display_name'],
|
||||||
snapshot['display_name'])
|
snapshot['display_name'])
|
||||||
self.assertEqual(resp_dict['snapshot']['display_description'],
|
self.assertEqual(resp_dict['snapshot']['display_description'],
|
||||||
snapshot['display_description'])
|
snapshot['display_description'])
|
||||||
|
|
||||||
def test_snapshot_create_force(self):
|
def test_snapshot_create_force(self):
|
||||||
self.stubs.Set(volume.api.API, "create_snapshot_force",
|
self.stubs.Set(volume.api.API,
|
||||||
stub_snapshot_create)
|
"create_snapshot_force",
|
||||||
|
stub_snapshot_create)
|
||||||
self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get)
|
self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get)
|
||||||
snapshot = {"volume_id": '12',
|
snapshot = {"volume_id": '12',
|
||||||
"force": True,
|
"force": True,
|
||||||
"display_name": "Snapshot Test Name",
|
"display_name": "Snapshot Test Name",
|
||||||
"display_description": "Snapshot Test Desc"}
|
"display_description": "Snapshot Test Desc"}
|
||||||
body = dict(snapshot=snapshot)
|
body = dict(snapshot=snapshot)
|
||||||
req = fakes.HTTPRequest.blank('/v1/snapshots')
|
req = fakes.HTTPRequest.blank('/v1/snapshots')
|
||||||
resp_dict = self.controller.create(req, body)
|
resp_dict = self.controller.create(req, body)
|
||||||
|
|
||||||
self.assertTrue('snapshot' in resp_dict)
|
self.assertTrue('snapshot' in resp_dict)
|
||||||
self.assertEqual(resp_dict['snapshot']['display_name'],
|
self.assertEqual(resp_dict['snapshot']['display_name'],
|
||||||
snapshot['display_name'])
|
snapshot['display_name'])
|
||||||
self.assertEqual(resp_dict['snapshot']['display_description'],
|
self.assertEqual(resp_dict['snapshot']['display_description'],
|
||||||
snapshot['display_description'])
|
snapshot['display_description'])
|
||||||
|
|
||||||
snapshot = {"volume_id": "12",
|
snapshot = {"volume_id": "12",
|
||||||
"force": "**&&^^%%$$##@@",
|
"force": "**&&^^%%$$##@@",
|
||||||
"display_name": "Snapshot Test Name",
|
"display_name": "Snapshot Test Name",
|
||||||
"display_description": "Snapshot Test Desc"}
|
"display_description": "Snapshot Test Desc"}
|
||||||
body = dict(snapshot=snapshot)
|
body = dict(snapshot=snapshot)
|
||||||
req = fakes.HTTPRequest.blank('/v1/snapshots')
|
req = fakes.HTTPRequest.blank('/v1/snapshots')
|
||||||
self.assertRaises(exception.InvalidParameterValue,
|
self.assertRaises(exception.InvalidParameterValue,
|
||||||
@ -133,9 +132,7 @@ class SnapshotApiTest(test.TestCase):
|
|||||||
self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
|
self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get)
|
||||||
self.stubs.Set(volume.api.API, "update_snapshot",
|
self.stubs.Set(volume.api.API, "update_snapshot",
|
||||||
fakes.stub_snapshot_update)
|
fakes.stub_snapshot_update)
|
||||||
updates = {
|
updates = {"display_name": "Updated Test Name", }
|
||||||
"display_name": "Updated Test Name",
|
|
||||||
}
|
|
||||||
body = {"snapshot": updates}
|
body = {"snapshot": updates}
|
||||||
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID)
|
req = fakes.HTTPRequest.blank('/v1/snapshots/%s' % UUID)
|
||||||
res_dict = self.controller.update(req, UUID, body)
|
res_dict = self.controller.update(req, UUID, body)
|
||||||
@ -207,8 +204,9 @@ class SnapshotApiTest(test.TestCase):
|
|||||||
snapshot_id)
|
snapshot_id)
|
||||||
|
|
||||||
def test_snapshot_detail(self):
|
def test_snapshot_detail(self):
|
||||||
self.stubs.Set(volume.api.API, "get_all_snapshots",
|
self.stubs.Set(volume.api.API,
|
||||||
stub_snapshot_get_all)
|
"get_all_snapshots",
|
||||||
|
stub_snapshot_get_all)
|
||||||
req = fakes.HTTPRequest.blank('/v1/snapshots/detail')
|
req = fakes.HTTPRequest.blank('/v1/snapshots/detail')
|
||||||
resp_dict = self.controller.detail(req)
|
resp_dict = self.controller.detail(req)
|
||||||
|
|
||||||
@ -350,8 +348,7 @@ class SnapshotSerializerTest(test.TestCase):
|
|||||||
created_at=datetime.datetime.now(),
|
created_at=datetime.datetime.now(),
|
||||||
display_name='snap_name',
|
display_name='snap_name',
|
||||||
display_description='snap_desc',
|
display_description='snap_desc',
|
||||||
volume_id='vol_id',
|
volume_id='vol_id', )
|
||||||
)
|
|
||||||
text = serializer.serialize(dict(snapshot=raw_snapshot))
|
text = serializer.serialize(dict(snapshot=raw_snapshot))
|
||||||
|
|
||||||
print text
|
print text
|
||||||
@ -361,24 +358,20 @@ class SnapshotSerializerTest(test.TestCase):
|
|||||||
|
|
||||||
def test_snapshot_index_detail_serializer(self):
|
def test_snapshot_index_detail_serializer(self):
|
||||||
serializer = snapshots.SnapshotsTemplate()
|
serializer = snapshots.SnapshotsTemplate()
|
||||||
raw_snapshots = [dict(
|
raw_snapshots = [dict(id='snap1_id',
|
||||||
id='snap1_id',
|
status='snap1_status',
|
||||||
status='snap1_status',
|
size=1024,
|
||||||
size=1024,
|
created_at=datetime.datetime.now(),
|
||||||
created_at=datetime.datetime.now(),
|
display_name='snap1_name',
|
||||||
display_name='snap1_name',
|
display_description='snap1_desc',
|
||||||
display_description='snap1_desc',
|
volume_id='vol1_id', ),
|
||||||
volume_id='vol1_id',
|
dict(id='snap2_id',
|
||||||
),
|
status='snap2_status',
|
||||||
dict(
|
size=1024,
|
||||||
id='snap2_id',
|
created_at=datetime.datetime.now(),
|
||||||
status='snap2_status',
|
display_name='snap2_name',
|
||||||
size=1024,
|
display_description='snap2_desc',
|
||||||
created_at=datetime.datetime.now(),
|
volume_id='vol2_id', )]
|
||||||
display_name='snap2_name',
|
|
||||||
display_description='snap2_desc',
|
|
||||||
volume_id='vol2_id',
|
|
||||||
)]
|
|
||||||
text = serializer.serialize(dict(snapshots=raw_snapshots))
|
text = serializer.serialize(dict(snapshots=raw_snapshots))
|
||||||
|
|
||||||
print text
|
print text
|
||||||
|
@ -40,15 +40,13 @@ def stub_snapshot_get(self, context, snapshot_id):
|
|||||||
if snapshot_id != TEST_SNAPSHOT_UUID:
|
if snapshot_id != TEST_SNAPSHOT_UUID:
|
||||||
raise exception.NotFound
|
raise exception.NotFound
|
||||||
|
|
||||||
return {
|
return {'id': snapshot_id,
|
||||||
'id': snapshot_id,
|
|
||||||
'volume_id': 12,
|
'volume_id': 12,
|
||||||
'status': 'available',
|
'status': 'available',
|
||||||
'volume_size': 100,
|
'volume_size': 100,
|
||||||
'created_at': None,
|
'created_at': None,
|
||||||
'display_name': 'Default name',
|
'display_name': 'Default name',
|
||||||
'display_description': 'Default description',
|
'display_description': 'Default description', }
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class VolumeApiTest(test.TestCase):
|
class VolumeApiTest(test.TestCase):
|
||||||
@ -89,7 +87,7 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'metadata': {},
|
'metadata': {},
|
||||||
'id': '1',
|
'id': '1',
|
||||||
'created_at': datetime.datetime(1, 1, 1,
|
'created_at': datetime.datetime(1, 1, 1,
|
||||||
1, 1, 1),
|
1, 1, 1),
|
||||||
'size': 100}}
|
'size': 100}}
|
||||||
self.assertEqual(res_dict, expected)
|
self.assertEqual(res_dict, expected)
|
||||||
|
|
||||||
@ -105,8 +103,7 @@ class VolumeApiTest(test.TestCase):
|
|||||||
"display_name": "Volume Test Name",
|
"display_name": "Volume Test Name",
|
||||||
"display_description": "Volume Test Desc",
|
"display_description": "Volume Test Desc",
|
||||||
"availability_zone": "zone1:host1",
|
"availability_zone": "zone1:host1",
|
||||||
"volume_type": db_vol_type['name'],
|
"volume_type": db_vol_type['name'], }
|
||||||
}
|
|
||||||
body = {"volume": vol}
|
body = {"volume": vol}
|
||||||
req = fakes.HTTPRequest.blank('/v1/volumes')
|
req = fakes.HTTPRequest.blank('/v1/volumes')
|
||||||
res_dict = self.controller.create(req, body)
|
res_dict = self.controller.create(req, body)
|
||||||
@ -128,28 +125,29 @@ class VolumeApiTest(test.TestCase):
|
|||||||
def test_volume_create_with_image_id(self):
|
def test_volume_create_with_image_id(self):
|
||||||
self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
|
self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
|
||||||
self.ext_mgr.extensions = {'os-image-create': 'fake'}
|
self.ext_mgr.extensions = {'os-image-create': 'fake'}
|
||||||
|
test_id = "c905cedb-7281-47e4-8a62-f26bc5fc4c77"
|
||||||
vol = {"size": '1',
|
vol = {"size": '1',
|
||||||
"display_name": "Volume Test Name",
|
"display_name": "Volume Test Name",
|
||||||
"display_description": "Volume Test Desc",
|
"display_description": "Volume Test Desc",
|
||||||
"availability_zone": "nova",
|
"availability_zone": "nova",
|
||||||
"imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'}
|
"imageRef": test_id}
|
||||||
expected = {'volume': {'status': 'fakestatus',
|
expected = {'volume': {'status': 'fakestatus',
|
||||||
'display_description': 'Volume Test Desc',
|
'display_description': 'Volume Test Desc',
|
||||||
'availability_zone': 'nova',
|
'availability_zone': 'nova',
|
||||||
'display_name': 'Volume Test Name',
|
'display_name': 'Volume Test Name',
|
||||||
'attachments': [{'device': '/',
|
'attachments': [{'device': '/',
|
||||||
'server_id': 'fakeuuid',
|
'server_id': 'fakeuuid',
|
||||||
'id': '1',
|
'id': '1',
|
||||||
'volume_id': '1'}],
|
'volume_id': '1'}],
|
||||||
'bootable': 'false',
|
'bootable': 'false',
|
||||||
'volume_type': 'vol_type_name',
|
'volume_type': 'vol_type_name',
|
||||||
'image_id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
|
'image_id': test_id,
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
'metadata': {},
|
'metadata': {},
|
||||||
'id': '1',
|
'id': '1',
|
||||||
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
|
'created_at': datetime.datetime(1, 1, 1,
|
||||||
'size': '1'}
|
1, 1, 1),
|
||||||
}
|
'size': '1'}}
|
||||||
body = {"volume": vol}
|
body = {"volume": vol}
|
||||||
req = fakes.HTTPRequest.blank('/v1/volumes')
|
req = fakes.HTTPRequest.blank('/v1/volumes')
|
||||||
res_dict = self.controller.create(req, body)
|
res_dict = self.controller.create(req, body)
|
||||||
@ -160,11 +158,11 @@ class VolumeApiTest(test.TestCase):
|
|||||||
self.stubs.Set(volume_api.API, "get_snapshot", stub_snapshot_get)
|
self.stubs.Set(volume_api.API, "get_snapshot", stub_snapshot_get)
|
||||||
self.ext_mgr.extensions = {'os-image-create': 'fake'}
|
self.ext_mgr.extensions = {'os-image-create': 'fake'}
|
||||||
vol = {"size": '1',
|
vol = {"size": '1',
|
||||||
"display_name": "Volume Test Name",
|
"display_name": "Volume Test Name",
|
||||||
"display_description": "Volume Test Desc",
|
"display_description": "Volume Test Desc",
|
||||||
"availability_zone": "cinder",
|
"availability_zone": "cinder",
|
||||||
"imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
|
"imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
|
||||||
"snapshot_id": TEST_SNAPSHOT_UUID}
|
"snapshot_id": TEST_SNAPSHOT_UUID}
|
||||||
body = {"volume": vol}
|
body = {"volume": vol}
|
||||||
req = fakes.HTTPRequest.blank('/v1/volumes')
|
req = fakes.HTTPRequest.blank('/v1/volumes')
|
||||||
self.assertRaises(webob.exc.HTTPBadRequest,
|
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||||
@ -176,10 +174,10 @@ class VolumeApiTest(test.TestCase):
|
|||||||
self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
|
self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
|
||||||
self.ext_mgr.extensions = {'os-image-create': 'fake'}
|
self.ext_mgr.extensions = {'os-image-create': 'fake'}
|
||||||
vol = {"size": '1',
|
vol = {"size": '1',
|
||||||
"display_name": "Volume Test Name",
|
"display_name": "Volume Test Name",
|
||||||
"display_description": "Volume Test Desc",
|
"display_description": "Volume Test Desc",
|
||||||
"availability_zone": "cinder",
|
"availability_zone": "cinder",
|
||||||
"imageRef": 1234}
|
"imageRef": 1234}
|
||||||
body = {"volume": vol}
|
body = {"volume": vol}
|
||||||
req = fakes.HTTPRequest.blank('/v1/volumes')
|
req = fakes.HTTPRequest.blank('/v1/volumes')
|
||||||
self.assertRaises(webob.exc.HTTPBadRequest,
|
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||||
@ -191,10 +189,10 @@ class VolumeApiTest(test.TestCase):
|
|||||||
self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
|
self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create)
|
||||||
self.ext_mgr.extensions = {'os-image-create': 'fake'}
|
self.ext_mgr.extensions = {'os-image-create': 'fake'}
|
||||||
vol = {"size": '1',
|
vol = {"size": '1',
|
||||||
"display_name": "Volume Test Name",
|
"display_name": "Volume Test Name",
|
||||||
"display_description": "Volume Test Desc",
|
"display_description": "Volume Test Desc",
|
||||||
"availability_zone": "cinder",
|
"availability_zone": "cinder",
|
||||||
"imageRef": '12345'}
|
"imageRef": '12345'}
|
||||||
body = {"volume": vol}
|
body = {"volume": vol}
|
||||||
req = fakes.HTTPRequest.blank('/v1/volumes')
|
req = fakes.HTTPRequest.blank('/v1/volumes')
|
||||||
self.assertRaises(webob.exc.HTTPBadRequest,
|
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||||
@ -305,7 +303,7 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'metadata': {},
|
'metadata': {},
|
||||||
'id': '1',
|
'id': '1',
|
||||||
'created_at': datetime.datetime(1, 1, 1,
|
'created_at': datetime.datetime(1, 1, 1,
|
||||||
1, 1, 1),
|
1, 1, 1),
|
||||||
'size': 1}]}
|
'size': 1}]}
|
||||||
self.assertEqual(res_dict, expected)
|
self.assertEqual(res_dict, expected)
|
||||||
|
|
||||||
@ -328,7 +326,7 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'metadata': {},
|
'metadata': {},
|
||||||
'id': '1',
|
'id': '1',
|
||||||
'created_at': datetime.datetime(1, 1, 1,
|
'created_at': datetime.datetime(1, 1, 1,
|
||||||
1, 1, 1),
|
1, 1, 1),
|
||||||
'size': 1}]}
|
'size': 1}]}
|
||||||
self.assertEqual(res_dict, expected)
|
self.assertEqual(res_dict, expected)
|
||||||
|
|
||||||
@ -410,7 +408,7 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'metadata': {},
|
'metadata': {},
|
||||||
'id': '1',
|
'id': '1',
|
||||||
'created_at': datetime.datetime(1, 1, 1,
|
'created_at': datetime.datetime(1, 1, 1,
|
||||||
1, 1, 1),
|
1, 1, 1),
|
||||||
'size': 1}}
|
'size': 1}}
|
||||||
self.assertEqual(res_dict, expected)
|
self.assertEqual(res_dict, expected)
|
||||||
|
|
||||||
@ -433,7 +431,7 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'metadata': {},
|
'metadata': {},
|
||||||
'id': '1',
|
'id': '1',
|
||||||
'created_at': datetime.datetime(1, 1, 1,
|
'created_at': datetime.datetime(1, 1, 1,
|
||||||
1, 1, 1),
|
1, 1, 1),
|
||||||
'size': 1}}
|
'size': 1}}
|
||||||
self.assertEqual(res_dict, expected)
|
self.assertEqual(res_dict, expected)
|
||||||
|
|
||||||
@ -460,7 +458,7 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'metadata': {},
|
'metadata': {},
|
||||||
'id': '1',
|
'id': '1',
|
||||||
'created_at': datetime.datetime(1, 1, 1,
|
'created_at': datetime.datetime(1, 1, 1,
|
||||||
1, 1, 1),
|
1, 1, 1),
|
||||||
'size': 1}}
|
'size': 1}}
|
||||||
self.assertEqual(res_dict, expected)
|
self.assertEqual(res_dict, expected)
|
||||||
|
|
||||||
@ -552,20 +550,16 @@ class VolumeSerializerTest(test.TestCase):
|
|||||||
size=1024,
|
size=1024,
|
||||||
availability_zone='vol_availability',
|
availability_zone='vol_availability',
|
||||||
created_at=datetime.datetime.now(),
|
created_at=datetime.datetime.now(),
|
||||||
attachments=[dict(
|
attachments=[dict(id='vol_id',
|
||||||
id='vol_id',
|
volume_id='vol_id',
|
||||||
volume_id='vol_id',
|
server_id='instance_uuid',
|
||||||
server_id='instance_uuid',
|
device='/foo')],
|
||||||
device='/foo')],
|
|
||||||
display_name='vol_name',
|
display_name='vol_name',
|
||||||
display_description='vol_desc',
|
display_description='vol_desc',
|
||||||
volume_type='vol_type',
|
volume_type='vol_type',
|
||||||
snapshot_id='snap_id',
|
snapshot_id='snap_id',
|
||||||
metadata=dict(
|
metadata=dict(foo='bar',
|
||||||
foo='bar',
|
baz='quux', ), )
|
||||||
baz='quux',
|
|
||||||
),
|
|
||||||
)
|
|
||||||
text = serializer.serialize(dict(volume=raw_volume))
|
text = serializer.serialize(dict(volume=raw_volume))
|
||||||
|
|
||||||
print text
|
print text
|
||||||
@ -575,46 +569,36 @@ class VolumeSerializerTest(test.TestCase):
|
|||||||
|
|
||||||
def test_volume_index_detail_serializer(self):
|
def test_volume_index_detail_serializer(self):
|
||||||
serializer = volumes.VolumesTemplate()
|
serializer = volumes.VolumesTemplate()
|
||||||
raw_volumes = [dict(
|
raw_volumes = [dict(id='vol1_id',
|
||||||
id='vol1_id',
|
status='vol1_status',
|
||||||
status='vol1_status',
|
size=1024,
|
||||||
size=1024,
|
availability_zone='vol1_availability',
|
||||||
availability_zone='vol1_availability',
|
created_at=datetime.datetime.now(),
|
||||||
created_at=datetime.datetime.now(),
|
attachments=[dict(id='vol1_id',
|
||||||
attachments=[dict(
|
volume_id='vol1_id',
|
||||||
id='vol1_id',
|
server_id='instance_uuid',
|
||||||
volume_id='vol1_id',
|
device='/foo1')],
|
||||||
server_id='instance_uuid',
|
display_name='vol1_name',
|
||||||
device='/foo1')],
|
display_description='vol1_desc',
|
||||||
display_name='vol1_name',
|
volume_type='vol1_type',
|
||||||
display_description='vol1_desc',
|
snapshot_id='snap1_id',
|
||||||
volume_type='vol1_type',
|
metadata=dict(foo='vol1_foo',
|
||||||
snapshot_id='snap1_id',
|
bar='vol1_bar', ), ),
|
||||||
metadata=dict(
|
dict(id='vol2_id',
|
||||||
foo='vol1_foo',
|
status='vol2_status',
|
||||||
bar='vol1_bar',
|
size=1024,
|
||||||
),
|
availability_zone='vol2_availability',
|
||||||
),
|
created_at=datetime.datetime.now(),
|
||||||
dict(
|
attachments=[dict(id='vol2_id',
|
||||||
id='vol2_id',
|
volume_id='vol2_id',
|
||||||
status='vol2_status',
|
server_id='instance_uuid',
|
||||||
size=1024,
|
device='/foo2')],
|
||||||
availability_zone='vol2_availability',
|
display_name='vol2_name',
|
||||||
created_at=datetime.datetime.now(),
|
display_description='vol2_desc',
|
||||||
attachments=[dict(
|
volume_type='vol2_type',
|
||||||
id='vol2_id',
|
snapshot_id='snap2_id',
|
||||||
volume_id='vol2_id',
|
metadata=dict(foo='vol2_foo',
|
||||||
server_id='instance_uuid',
|
bar='vol2_bar', ), )]
|
||||||
device='/foo2')],
|
|
||||||
display_name='vol2_name',
|
|
||||||
display_description='vol2_desc',
|
|
||||||
volume_type='vol2_type',
|
|
||||||
snapshot_id='snap2_id',
|
|
||||||
metadata=dict(
|
|
||||||
foo='vol2_foo',
|
|
||||||
bar='vol2_bar',
|
|
||||||
),
|
|
||||||
)]
|
|
||||||
text = serializer.serialize(dict(volumes=raw_volumes))
|
text = serializer.serialize(dict(volumes=raw_volumes))
|
||||||
|
|
||||||
print text
|
print text
|
||||||
@ -637,11 +621,7 @@ class TestVolumeCreateRequestXMLDeserializer(test.TestCase):
|
|||||||
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
|
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
|
||||||
size="1"></volume>"""
|
size="1"></volume>"""
|
||||||
request = self.deserializer.deserialize(self_request)
|
request = self.deserializer.deserialize(self_request)
|
||||||
expected = {
|
expected = {"volume": {"size": "1", }, }
|
||||||
"volume": {
|
|
||||||
"size": "1",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
self.assertEquals(request['body'], expected)
|
self.assertEquals(request['body'], expected)
|
||||||
|
|
||||||
def test_display_name(self):
|
def test_display_name(self):
|
||||||
|
@ -159,10 +159,8 @@ class LimitsControllerTest(BaseLimitTestSuite):
|
|||||||
},
|
},
|
||||||
|
|
||||||
],
|
],
|
||||||
"absolute": {
|
"absolute": {"maxTotalVolumeGigabytes": 512,
|
||||||
"maxTotalVolumeGigabytes": 512,
|
"maxTotalVolumes": 5, },
|
||||||
"maxTotalVolumes": 5,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
body = jsonutils.loads(response.body)
|
body = jsonutils.loads(response.body)
|
||||||
@ -590,7 +588,6 @@ class WsgiLimiterTest(BaseLimitTestSuite):
|
|||||||
|
|
||||||
def test_invalid_methods(self):
|
def test_invalid_methods(self):
|
||||||
"""Only POSTs should work."""
|
"""Only POSTs should work."""
|
||||||
requests = []
|
|
||||||
for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
|
for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
|
||||||
request = webob.Request.blank("/", method=method)
|
request = webob.Request.blank("/", method=method)
|
||||||
response = request.get_response(self.app)
|
response = request.get_response(self.app)
|
||||||
@ -776,44 +773,26 @@ class LimitsViewBuilderTest(test.TestCase):
|
|||||||
"injected_file_content_bytes": 5}
|
"injected_file_content_bytes": 5}
|
||||||
|
|
||||||
def test_build_limits(self):
|
def test_build_limits(self):
|
||||||
|
tdate = "2011-07-21T18:17:06Z"
|
||||||
expected_limits = {
|
expected_limits = {
|
||||||
"limits": {
|
"limits": {"rate": [{"uri": "*",
|
||||||
"rate": [
|
"regex": ".*",
|
||||||
{
|
"limit": [{"value": 10,
|
||||||
"uri": "*",
|
"verb": "POST",
|
||||||
"regex": ".*",
|
"remaining": 2,
|
||||||
"limit": [
|
"unit": "MINUTE",
|
||||||
{
|
"next-available": tdate}]},
|
||||||
"value": 10,
|
{"uri": "*/volumes",
|
||||||
"verb": "POST",
|
"regex": "^/volumes",
|
||||||
"remaining": 2,
|
"limit": [{"value": 50,
|
||||||
"unit": "MINUTE",
|
"verb": "POST",
|
||||||
"next-available": "2011-07-21T18:17:06Z"
|
"remaining": 10,
|
||||||
}
|
"unit": "DAY",
|
||||||
]
|
"next-available": tdate}]}],
|
||||||
},
|
"absolute": {"maxServerMeta": 1,
|
||||||
{
|
"maxImageMeta": 1,
|
||||||
"uri": "*/volumes",
|
"maxPersonality": 5,
|
||||||
"regex": "^/volumes",
|
"maxPersonalitySize": 5}}}
|
||||||
"limit": [
|
|
||||||
{
|
|
||||||
"value": 50,
|
|
||||||
"verb": "POST",
|
|
||||||
"remaining": 10,
|
|
||||||
"unit": "DAY",
|
|
||||||
"next-available": "2011-07-21T18:17:06Z"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"absolute": {
|
|
||||||
"maxServerMeta": 1,
|
|
||||||
"maxImageMeta": 1,
|
|
||||||
"maxPersonality": 5,
|
|
||||||
"maxPersonalitySize": 5
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
output = self.view_builder.build(self.rate_limits,
|
output = self.view_builder.build(self.rate_limits,
|
||||||
self.absolute_limits)
|
self.absolute_limits)
|
||||||
@ -842,30 +821,26 @@ class LimitsXMLSerializationTest(test.TestCase):
|
|||||||
self.assertTrue(has_dec)
|
self.assertTrue(has_dec)
|
||||||
|
|
||||||
def test_index(self):
|
def test_index(self):
|
||||||
|
tdate = "2011-12-15T22:42:45Z"
|
||||||
serializer = limits.LimitsTemplate()
|
serializer = limits.LimitsTemplate()
|
||||||
fixture = {
|
fixture = {"limits": {"rate": [{"uri": "*",
|
||||||
"limits": {
|
"regex": ".*",
|
||||||
"rate": [{
|
"limit": [{"value": 10,
|
||||||
"uri": "*",
|
"verb": "POST",
|
||||||
"regex": ".*",
|
"remaining": 2,
|
||||||
"limit": [{
|
"unit": "MINUTE",
|
||||||
"value": 10,
|
"next-available": tdate}]},
|
||||||
"verb": "POST",
|
{"uri": "*/servers",
|
||||||
"remaining": 2,
|
"regex": "^/servers",
|
||||||
"unit": "MINUTE",
|
"limit": [{"value": 50,
|
||||||
"next-available": "2011-12-15T22:42:45Z"}]},
|
"verb": "POST",
|
||||||
{"uri": "*/servers",
|
"remaining": 10,
|
||||||
"regex": "^/servers",
|
"unit": "DAY",
|
||||||
"limit": [{
|
"next-available": tdate}]}],
|
||||||
"value": 50,
|
"absolute": {"maxServerMeta": 1,
|
||||||
"verb": "POST",
|
"maxImageMeta": 1,
|
||||||
"remaining": 10,
|
"maxPersonality": 5,
|
||||||
"unit": "DAY",
|
"maxPersonalitySize": 10240}}}
|
||||||
"next-available": "2011-12-15T22:42:45Z"}]}],
|
|
||||||
"absolute": {"maxServerMeta": 1,
|
|
||||||
"maxImageMeta": 1,
|
|
||||||
"maxPersonality": 5,
|
|
||||||
"maxPersonalitySize": 10240}}}
|
|
||||||
|
|
||||||
output = serializer.serialize(fixture)
|
output = serializer.serialize(fixture)
|
||||||
root = etree.XML(output)
|
root = etree.XML(output)
|
||||||
@ -891,8 +866,9 @@ class LimitsXMLSerializationTest(test.TestCase):
|
|||||||
for j, limit in enumerate(rate_limits):
|
for j, limit in enumerate(rate_limits):
|
||||||
for key in ['verb', 'value', 'remaining', 'unit',
|
for key in ['verb', 'value', 'remaining', 'unit',
|
||||||
'next-available']:
|
'next-available']:
|
||||||
self.assertEqual(limit.get(key),
|
self.assertEqual(
|
||||||
str(fixture['limits']['rate'][i]['limit'][j][key]))
|
limit.get(key),
|
||||||
|
str(fixture['limits']['rate'][i]['limit'][j][key]))
|
||||||
|
|
||||||
def test_index_no_limits(self):
|
def test_index_no_limits(self):
|
||||||
serializer = limits.LimitsTemplate()
|
serializer = limits.LimitsTemplate()
|
||||||
|
@ -614,33 +614,24 @@ class VolumeSerializerTest(test.TestCase):
|
|||||||
display_description='vol1_desc',
|
display_description='vol1_desc',
|
||||||
volume_type='vol1_type',
|
volume_type='vol1_type',
|
||||||
snapshot_id='snap1_id',
|
snapshot_id='snap1_id',
|
||||||
metadata=dict(
|
metadata=dict(foo='vol1_foo',
|
||||||
foo='vol1_foo',
|
bar='vol1_bar', ), ),
|
||||||
bar='vol1_bar',
|
|
||||||
),
|
|
||||||
),
|
|
||||||
dict(
|
dict(
|
||||||
id='vol2_id',
|
id='vol2_id',
|
||||||
status='vol2_status',
|
status='vol2_status',
|
||||||
size=1024,
|
size=1024,
|
||||||
availability_zone='vol2_availability',
|
availability_zone='vol2_availability',
|
||||||
created_at=datetime.datetime.now(),
|
created_at=datetime.datetime.now(),
|
||||||
attachments=[
|
attachments=[dict(id='vol2_id',
|
||||||
dict(
|
volume_id='vol2_id',
|
||||||
id='vol2_id',
|
server_id='instance_uuid',
|
||||||
volume_id='vol2_id',
|
device='/foo2')],
|
||||||
server_id='instance_uuid',
|
|
||||||
device='/foo2')],
|
|
||||||
display_name='vol2_name',
|
display_name='vol2_name',
|
||||||
display_description='vol2_desc',
|
display_description='vol2_desc',
|
||||||
volume_type='vol2_type',
|
volume_type='vol2_type',
|
||||||
snapshot_id='snap2_id',
|
snapshot_id='snap2_id',
|
||||||
metadata=dict(
|
metadata=dict(foo='vol2_foo',
|
||||||
foo='vol2_foo',
|
bar='vol2_bar', ), )]
|
||||||
bar='vol2_bar',
|
|
||||||
),
|
|
||||||
)
|
|
||||||
]
|
|
||||||
text = serializer.serialize(dict(volumes=raw_volumes))
|
text = serializer.serialize(dict(volumes=raw_volumes))
|
||||||
|
|
||||||
print text
|
print text
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""Stubouts, mocks and fixtures for the test suite"""
|
"""Stubouts, mocks and fixtures for the test suite."""
|
||||||
|
|
||||||
from cinder import db
|
from cinder import db
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""Implementation of a fake image service"""
|
"""Implementation of a fake image service."""
|
||||||
|
|
||||||
import copy
|
import copy
|
||||||
import datetime
|
import datetime
|
||||||
@ -44,101 +44,101 @@ class _FakeImageService(object):
|
|||||||
timestamp = datetime.datetime(2011, 01, 01, 01, 02, 03)
|
timestamp = datetime.datetime(2011, 01, 01, 01, 02, 03)
|
||||||
|
|
||||||
image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
|
image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
|
||||||
'name': 'fakeimage123456',
|
'name': 'fakeimage123456',
|
||||||
'created_at': timestamp,
|
'created_at': timestamp,
|
||||||
'updated_at': timestamp,
|
'updated_at': timestamp,
|
||||||
'deleted_at': None,
|
'deleted_at': None,
|
||||||
'deleted': False,
|
'deleted': False,
|
||||||
'status': 'active',
|
'status': 'active',
|
||||||
'is_public': False,
|
'is_public': False,
|
||||||
'container_format': 'raw',
|
'container_format': 'raw',
|
||||||
'disk_format': 'raw',
|
'disk_format': 'raw',
|
||||||
'properties': {'kernel_id': 'nokernel',
|
'properties': {'kernel_id': 'nokernel',
|
||||||
'ramdisk_id': 'nokernel',
|
'ramdisk_id': 'nokernel',
|
||||||
'architecture': 'x86_64'}}
|
'architecture': 'x86_64'}}
|
||||||
|
|
||||||
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
|
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
|
||||||
'name': 'fakeimage123456',
|
'name': 'fakeimage123456',
|
||||||
'created_at': timestamp,
|
'created_at': timestamp,
|
||||||
'updated_at': timestamp,
|
'updated_at': timestamp,
|
||||||
'deleted_at': None,
|
'deleted_at': None,
|
||||||
'deleted': False,
|
'deleted': False,
|
||||||
'status': 'active',
|
'status': 'active',
|
||||||
'is_public': True,
|
'is_public': True,
|
||||||
'container_format': 'ami',
|
'container_format': 'ami',
|
||||||
'disk_format': 'ami',
|
'disk_format': 'ami',
|
||||||
'properties': {'kernel_id': 'nokernel',
|
'properties': {'kernel_id': 'nokernel',
|
||||||
'ramdisk_id': 'nokernel'}}
|
'ramdisk_id': 'nokernel'}}
|
||||||
|
|
||||||
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
|
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
|
||||||
'name': 'fakeimage123456',
|
'name': 'fakeimage123456',
|
||||||
'created_at': timestamp,
|
'created_at': timestamp,
|
||||||
'updated_at': timestamp,
|
'updated_at': timestamp,
|
||||||
'deleted_at': None,
|
'deleted_at': None,
|
||||||
'deleted': False,
|
'deleted': False,
|
||||||
'status': 'active',
|
'status': 'active',
|
||||||
'is_public': True,
|
'is_public': True,
|
||||||
'container_format': None,
|
'container_format': None,
|
||||||
'disk_format': None,
|
'disk_format': None,
|
||||||
'properties': {'kernel_id': 'nokernel',
|
'properties': {'kernel_id': 'nokernel',
|
||||||
'ramdisk_id': 'nokernel'}}
|
'ramdisk_id': 'nokernel'}}
|
||||||
|
|
||||||
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
|
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
|
||||||
'name': 'fakeimage123456',
|
'name': 'fakeimage123456',
|
||||||
'created_at': timestamp,
|
'created_at': timestamp,
|
||||||
'updated_at': timestamp,
|
'updated_at': timestamp,
|
||||||
'deleted_at': None,
|
'deleted_at': None,
|
||||||
'deleted': False,
|
'deleted': False,
|
||||||
'status': 'active',
|
'status': 'active',
|
||||||
'is_public': True,
|
'is_public': True,
|
||||||
'container_format': 'ami',
|
'container_format': 'ami',
|
||||||
'disk_format': 'ami',
|
'disk_format': 'ami',
|
||||||
'properties': {'kernel_id': 'nokernel',
|
'properties': {'kernel_id': 'nokernel',
|
||||||
'ramdisk_id': 'nokernel'}}
|
'ramdisk_id': 'nokernel'}}
|
||||||
|
|
||||||
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
|
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
|
||||||
'name': 'fakeimage123456',
|
'name': 'fakeimage123456',
|
||||||
'created_at': timestamp,
|
'created_at': timestamp,
|
||||||
'updated_at': timestamp,
|
'updated_at': timestamp,
|
||||||
'deleted_at': None,
|
'deleted_at': None,
|
||||||
'deleted': False,
|
'deleted': False,
|
||||||
'status': 'active',
|
'status': 'active',
|
||||||
'is_public': True,
|
'is_public': True,
|
||||||
'container_format': 'ami',
|
'container_format': 'ami',
|
||||||
'disk_format': 'ami',
|
'disk_format': 'ami',
|
||||||
'properties': {'kernel_id':
|
'properties': {
|
||||||
'155d900f-4e14-4e4c-a73d-069cbf4541e6',
|
'kernel_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
|
||||||
'ramdisk_id': None}}
|
'ramdisk_id': None}}
|
||||||
|
|
||||||
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
|
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
|
||||||
'name': 'fakeimage6',
|
'name': 'fakeimage6',
|
||||||
'created_at': timestamp,
|
'created_at': timestamp,
|
||||||
'updated_at': timestamp,
|
'updated_at': timestamp,
|
||||||
'deleted_at': None,
|
'deleted_at': None,
|
||||||
'deleted': False,
|
'deleted': False,
|
||||||
'status': 'active',
|
'status': 'active',
|
||||||
'is_public': False,
|
'is_public': False,
|
||||||
'container_format': 'ova',
|
'container_format': 'ova',
|
||||||
'disk_format': 'vhd',
|
'disk_format': 'vhd',
|
||||||
'properties': {'kernel_id': 'nokernel',
|
'properties': {'kernel_id': 'nokernel',
|
||||||
'ramdisk_id': 'nokernel',
|
'ramdisk_id': 'nokernel',
|
||||||
'architecture': 'x86_64',
|
'architecture': 'x86_64',
|
||||||
'auto_disk_config': 'False'}}
|
'auto_disk_config': 'False'}}
|
||||||
|
|
||||||
image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
|
image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
|
||||||
'name': 'fakeimage7',
|
'name': 'fakeimage7',
|
||||||
'created_at': timestamp,
|
'created_at': timestamp,
|
||||||
'updated_at': timestamp,
|
'updated_at': timestamp,
|
||||||
'deleted_at': None,
|
'deleted_at': None,
|
||||||
'deleted': False,
|
'deleted': False,
|
||||||
'status': 'active',
|
'status': 'active',
|
||||||
'is_public': False,
|
'is_public': False,
|
||||||
'container_format': 'ova',
|
'container_format': 'ova',
|
||||||
'disk_format': 'vhd',
|
'disk_format': 'vhd',
|
||||||
'properties': {'kernel_id': 'nokernel',
|
'properties': {'kernel_id': 'nokernel',
|
||||||
'ramdisk_id': 'nokernel',
|
'ramdisk_id': 'nokernel',
|
||||||
'architecture': 'x86_64',
|
'architecture': 'x86_64',
|
||||||
'auto_disk_config': 'True'}}
|
'auto_disk_config': 'True'}}
|
||||||
|
|
||||||
self.create(None, image1)
|
self.create(None, image1)
|
||||||
self.create(None, image2)
|
self.create(None, image2)
|
||||||
|
@ -31,7 +31,7 @@ from cinder.tests.glance import stubs as glance_stubs
|
|||||||
|
|
||||||
|
|
||||||
class NullWriter(object):
|
class NullWriter(object):
|
||||||
"""Used to test ImageService.get which takes a writer object"""
|
"""Used to test ImageService.get which takes a writer object."""
|
||||||
|
|
||||||
def write(self, *arg, **kwargs):
|
def write(self, *arg, **kwargs):
|
||||||
pass
|
pass
|
||||||
@ -109,11 +109,11 @@ class TestGlanceImageService(test.TestCase):
|
|||||||
def _fake_create_glance_client(context, host, port, version):
|
def _fake_create_glance_client(context, host, port, version):
|
||||||
return client
|
return client
|
||||||
|
|
||||||
self.stubs.Set(glance, '_create_glance_client',
|
self.stubs.Set(glance,
|
||||||
_fake_create_glance_client)
|
'_create_glance_client',
|
||||||
|
_fake_create_glance_client)
|
||||||
|
|
||||||
client_wrapper = glance.GlanceClientWrapper(
|
client_wrapper = glance.GlanceClientWrapper('fake', 'fake_host', 9292)
|
||||||
'fake', 'fake_host', 9292)
|
|
||||||
return glance.GlanceImageService(client=client_wrapper)
|
return glance.GlanceImageService(client=client_wrapper)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -131,7 +131,7 @@ class TestGlanceImageService(test.TestCase):
|
|||||||
deleted_at=self.NOW_GLANCE_FORMAT)
|
deleted_at=self.NOW_GLANCE_FORMAT)
|
||||||
|
|
||||||
def test_create_with_instance_id(self):
|
def test_create_with_instance_id(self):
|
||||||
"""Ensure instance_id is persisted as an image-property"""
|
"""Ensure instance_id is persisted as an image-property."""
|
||||||
fixture = {'name': 'test image',
|
fixture = {'name': 'test image',
|
||||||
'is_public': False,
|
'is_public': False,
|
||||||
'properties': {'instance_id': '42', 'user_id': 'fake'}}
|
'properties': {'instance_id': '42', 'user_id': 'fake'}}
|
||||||
@ -458,7 +458,10 @@ class TestGlanceImageService(test.TestCase):
|
|||||||
# When retries are disabled, we should get an exception
|
# When retries are disabled, we should get an exception
|
||||||
self.flags(glance_num_retries=0)
|
self.flags(glance_num_retries=0)
|
||||||
self.assertRaises(exception.GlanceConnectionFailed,
|
self.assertRaises(exception.GlanceConnectionFailed,
|
||||||
service.download, self.context, image_id, writer)
|
service.download,
|
||||||
|
self.context,
|
||||||
|
image_id,
|
||||||
|
writer)
|
||||||
|
|
||||||
# Now lets enable retries. No exception should happen now.
|
# Now lets enable retries. No exception should happen now.
|
||||||
tries = [0]
|
tries = [0]
|
||||||
@ -520,19 +523,19 @@ class TestGlanceImageService(test.TestCase):
|
|||||||
def test_glance_client_image_id(self):
|
def test_glance_client_image_id(self):
|
||||||
fixture = self._make_fixture(name='test image')
|
fixture = self._make_fixture(name='test image')
|
||||||
image_id = self.service.create(self.context, fixture)['id']
|
image_id = self.service.create(self.context, fixture)['id']
|
||||||
(service, same_id) = glance.get_remote_image_service(
|
(service, same_id) = glance.get_remote_image_service(self.context,
|
||||||
self.context, image_id)
|
image_id)
|
||||||
self.assertEquals(same_id, image_id)
|
self.assertEquals(same_id, image_id)
|
||||||
|
|
||||||
def test_glance_client_image_ref(self):
|
def test_glance_client_image_ref(self):
|
||||||
fixture = self._make_fixture(name='test image')
|
fixture = self._make_fixture(name='test image')
|
||||||
image_id = self.service.create(self.context, fixture)['id']
|
image_id = self.service.create(self.context, fixture)['id']
|
||||||
image_url = 'http://something-less-likely/%s' % image_id
|
image_url = 'http://something-less-likely/%s' % image_id
|
||||||
(service, same_id) = glance.get_remote_image_service(
|
(service, same_id) = glance.get_remote_image_service(self.context,
|
||||||
self.context, image_url)
|
image_url)
|
||||||
self.assertEquals(same_id, image_id)
|
self.assertEquals(same_id, image_id)
|
||||||
self.assertEquals(service._client.host,
|
self.assertEquals(service._client.host,
|
||||||
'something-less-likely')
|
'something-less-likely')
|
||||||
|
|
||||||
|
|
||||||
def _create_failing_glance_client(info):
|
def _create_failing_glance_client(info):
|
||||||
|
@ -53,7 +53,7 @@ class OpenStackApiAuthorizationException(OpenStackApiException):
|
|||||||
if not message:
|
if not message:
|
||||||
message = _("Authorization error")
|
message = _("Authorization error")
|
||||||
super(OpenStackApiAuthorizationException, self).__init__(message,
|
super(OpenStackApiAuthorizationException, self).__init__(message,
|
||||||
response)
|
response)
|
||||||
|
|
||||||
|
|
||||||
class OpenStackApiNotFoundException(OpenStackApiException):
|
class OpenStackApiNotFoundException(OpenStackApiException):
|
||||||
@ -157,8 +157,8 @@ class TestOpenStackClient(object):
|
|||||||
raise OpenStackApiAuthorizationException(response=response)
|
raise OpenStackApiAuthorizationException(response=response)
|
||||||
else:
|
else:
|
||||||
raise OpenStackApiException(
|
raise OpenStackApiException(
|
||||||
message=_("Unexpected status code"),
|
message=_("Unexpected status code"),
|
||||||
response=response)
|
response=response)
|
||||||
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ CALLED_FUNCTION = []
|
|||||||
|
|
||||||
|
|
||||||
def example_decorator(name, function):
|
def example_decorator(name, function):
|
||||||
""" decorator for notify which is used from utils.monkey_patch()
|
"""decorator for notify which is used from utils.monkey_patch().
|
||||||
|
|
||||||
:param name: name of the function
|
:param name: name of the function
|
||||||
:param function: - object of the function
|
:param function: - object of the function
|
||||||
|
@ -64,13 +64,18 @@ class SchedulerRpcAPITestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_update_service_capabilities(self):
|
def test_update_service_capabilities(self):
|
||||||
self._test_scheduler_api('update_service_capabilities',
|
self._test_scheduler_api('update_service_capabilities',
|
||||||
rpc_method='fanout_cast', service_name='fake_name',
|
rpc_method='fanout_cast',
|
||||||
host='fake_host', capabilities='fake_capabilities')
|
service_name='fake_name',
|
||||||
|
host='fake_host',
|
||||||
|
capabilities='fake_capabilities')
|
||||||
|
|
||||||
def test_create_volume(self):
|
def test_create_volume(self):
|
||||||
self._test_scheduler_api('create_volume',
|
self._test_scheduler_api('create_volume',
|
||||||
rpc_method='cast', topic='topic', volume_id='volume_id',
|
rpc_method='cast',
|
||||||
snapshot_id='snapshot_id', image_id='image_id',
|
topic='topic',
|
||||||
request_spec='fake_request_spec',
|
volume_id='volume_id',
|
||||||
filter_properties='filter_properties',
|
snapshot_id='snapshot_id',
|
||||||
version='1.2')
|
image_id='image_id',
|
||||||
|
request_spec='fake_request_spec',
|
||||||
|
filter_properties='filter_properties',
|
||||||
|
version='1.2')
|
||||||
|
@ -35,7 +35,7 @@ FLAGS = flags.FLAGS
|
|||||||
|
|
||||||
|
|
||||||
class SchedulerManagerTestCase(test.TestCase):
|
class SchedulerManagerTestCase(test.TestCase):
|
||||||
"""Test case for scheduler manager"""
|
"""Test case for scheduler manager."""
|
||||||
|
|
||||||
manager_cls = manager.SchedulerManager
|
manager_cls = manager.SchedulerManager
|
||||||
driver_cls = driver.Scheduler
|
driver_cls = driver.Scheduler
|
||||||
@ -63,29 +63,34 @@ class SchedulerManagerTestCase(test.TestCase):
|
|||||||
host = 'fake_host'
|
host = 'fake_host'
|
||||||
|
|
||||||
self.mox.StubOutWithMock(self.manager.driver,
|
self.mox.StubOutWithMock(self.manager.driver,
|
||||||
'update_service_capabilities')
|
'update_service_capabilities')
|
||||||
|
|
||||||
# Test no capabilities passes empty dictionary
|
# Test no capabilities passes empty dictionary
|
||||||
self.manager.driver.update_service_capabilities(service_name,
|
self.manager.driver.update_service_capabilities(service_name,
|
||||||
host, {})
|
host, {})
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
result = self.manager.update_service_capabilities(self.context,
|
result = self.manager.update_service_capabilities(
|
||||||
service_name=service_name, host=host)
|
self.context,
|
||||||
|
service_name=service_name,
|
||||||
|
host=host)
|
||||||
self.mox.VerifyAll()
|
self.mox.VerifyAll()
|
||||||
|
|
||||||
self.mox.ResetAll()
|
self.mox.ResetAll()
|
||||||
# Test capabilities passes correctly
|
# Test capabilities passes correctly
|
||||||
capabilities = {'fake_capability': 'fake_value'}
|
capabilities = {'fake_capability': 'fake_value'}
|
||||||
self.manager.driver.update_service_capabilities(
|
self.manager.driver.update_service_capabilities(service_name,
|
||||||
service_name, host, capabilities)
|
host,
|
||||||
|
capabilities)
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
result = self.manager.update_service_capabilities(self.context,
|
result = self.manager.update_service_capabilities(
|
||||||
service_name=service_name, host=host,
|
self.context,
|
||||||
capabilities=capabilities)
|
service_name=service_name, host=host,
|
||||||
|
capabilities=capabilities)
|
||||||
|
|
||||||
def test_create_volume_exception_puts_volume_in_error_state(self):
|
def test_create_volume_exception_puts_volume_in_error_state(self):
|
||||||
""" Test that a NoValideHost exception for create_volume puts
|
"""Test that a NoValideHost exception for create_volume.
|
||||||
the volume in 'error' state and eats the exception.
|
|
||||||
|
Puts the volume in 'error' state and eats the exception.
|
||||||
"""
|
"""
|
||||||
fake_volume_id = 1
|
fake_volume_id = 1
|
||||||
self._mox_schedule_method_helper('schedule_create_volume')
|
self._mox_schedule_method_helper('schedule_create_volume')
|
||||||
@ -95,7 +100,8 @@ class SchedulerManagerTestCase(test.TestCase):
|
|||||||
volume_id = fake_volume_id
|
volume_id = fake_volume_id
|
||||||
request_spec = {'volume_id': fake_volume_id}
|
request_spec = {'volume_id': fake_volume_id}
|
||||||
|
|
||||||
self.manager.driver.schedule_create_volume(self.context,
|
self.manager.driver.schedule_create_volume(
|
||||||
|
self.context,
|
||||||
request_spec, {}).AndRaise(exception.NoValidHost(reason=""))
|
request_spec, {}).AndRaise(exception.NoValidHost(reason=""))
|
||||||
db.volume_update(self.context, fake_volume_id, {'status': 'error'})
|
db.volume_update(self.context, fake_volume_id, {'status': 'error'})
|
||||||
|
|
||||||
@ -112,11 +118,11 @@ class SchedulerManagerTestCase(test.TestCase):
|
|||||||
setattr(self.manager.driver, method_name, stub_method)
|
setattr(self.manager.driver, method_name, stub_method)
|
||||||
|
|
||||||
self.mox.StubOutWithMock(self.manager.driver,
|
self.mox.StubOutWithMock(self.manager.driver,
|
||||||
method_name)
|
method_name)
|
||||||
|
|
||||||
|
|
||||||
class SchedulerTestCase(test.TestCase):
|
class SchedulerTestCase(test.TestCase):
|
||||||
"""Test case for base scheduler driver class"""
|
"""Test case for base scheduler driver class."""
|
||||||
|
|
||||||
# So we can subclass this test and re-use tests if we need.
|
# So we can subclass this test and re-use tests if we need.
|
||||||
driver_cls = driver.Scheduler
|
driver_cls = driver.Scheduler
|
||||||
@ -132,14 +138,16 @@ class SchedulerTestCase(test.TestCase):
|
|||||||
host = 'fake_host'
|
host = 'fake_host'
|
||||||
|
|
||||||
self.mox.StubOutWithMock(self.driver.host_manager,
|
self.mox.StubOutWithMock(self.driver.host_manager,
|
||||||
'update_service_capabilities')
|
'update_service_capabilities')
|
||||||
|
|
||||||
capabilities = {'fake_capability': 'fake_value'}
|
capabilities = {'fake_capability': 'fake_value'}
|
||||||
self.driver.host_manager.update_service_capabilities(
|
self.driver.host_manager.update_service_capabilities(service_name,
|
||||||
service_name, host, capabilities)
|
host,
|
||||||
|
capabilities)
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
result = self.driver.update_service_capabilities(service_name,
|
result = self.driver.update_service_capabilities(service_name,
|
||||||
host, capabilities)
|
host,
|
||||||
|
capabilities)
|
||||||
|
|
||||||
def test_hosts_up(self):
|
def test_hosts_up(self):
|
||||||
service1 = {'host': 'host1'}
|
service1 = {'host': 'host1'}
|
||||||
@ -150,7 +158,7 @@ class SchedulerTestCase(test.TestCase):
|
|||||||
self.mox.StubOutWithMock(utils, 'service_is_up')
|
self.mox.StubOutWithMock(utils, 'service_is_up')
|
||||||
|
|
||||||
db.service_get_all_by_topic(self.context,
|
db.service_get_all_by_topic(self.context,
|
||||||
self.topic).AndReturn(services)
|
self.topic).AndReturn(services)
|
||||||
utils.service_is_up(service1).AndReturn(False)
|
utils.service_is_up(service1).AndReturn(False)
|
||||||
utils.service_is_up(service2).AndReturn(True)
|
utils.service_is_up(service2).AndReturn(True)
|
||||||
|
|
||||||
@ -168,12 +176,12 @@ class SchedulerDriverBaseTestCase(SchedulerTestCase):
|
|||||||
fake_kwargs = {'cat': 'meow'}
|
fake_kwargs = {'cat': 'meow'}
|
||||||
|
|
||||||
self.assertRaises(NotImplementedError, self.driver.schedule,
|
self.assertRaises(NotImplementedError, self.driver.schedule,
|
||||||
self.context, self.topic, 'schedule_something',
|
self.context, self.topic, 'schedule_something',
|
||||||
*fake_args, **fake_kwargs)
|
*fake_args, **fake_kwargs)
|
||||||
|
|
||||||
|
|
||||||
class SchedulerDriverModuleTestCase(test.TestCase):
|
class SchedulerDriverModuleTestCase(test.TestCase):
|
||||||
"""Test case for scheduler driver module methods"""
|
"""Test case for scheduler driver module methods."""
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(SchedulerDriverModuleTestCase, self).setUp()
|
super(SchedulerDriverModuleTestCase, self).setUp()
|
||||||
@ -185,7 +193,8 @@ class SchedulerDriverModuleTestCase(test.TestCase):
|
|||||||
|
|
||||||
timeutils.utcnow().AndReturn('fake-now')
|
timeutils.utcnow().AndReturn('fake-now')
|
||||||
db.volume_update(self.context, 31337,
|
db.volume_update(self.context, 31337,
|
||||||
{'host': 'fake_host', 'scheduled_at': 'fake-now'})
|
{'host': 'fake_host',
|
||||||
|
'scheduled_at': 'fake-now'})
|
||||||
|
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
driver.volume_update_db(self.context, 31337, 'fake_host')
|
driver.volume_update_db(self.context, 31337, 'fake_host')
|
||||||
|
@ -32,11 +32,12 @@ class HpSanISCSITestCase(test.TestCase):
|
|||||||
self.connector = {'ip': '10.0.0.2',
|
self.connector = {'ip': '10.0.0.2',
|
||||||
'initiator': 'iqn.1993-08.org.debian:01:222',
|
'initiator': 'iqn.1993-08.org.debian:01:222',
|
||||||
'host': 'fakehost'}
|
'host': 'fakehost'}
|
||||||
self.properties = {'target_discoverd': True,
|
self.properties = {
|
||||||
'target_portal': '10.0.1.6:3260',
|
'target_discoverd': True,
|
||||||
'target_iqn':
|
'target_portal': '10.0.1.6:3260',
|
||||||
'iqn.2003-10.com.lefthandnetworks:group01:25366:fakev',
|
'target_iqn':
|
||||||
'volume_id': 1}
|
'iqn.2003-10.com.lefthandnetworks:group01:25366:fakev',
|
||||||
|
'volume_id': 1}
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super(HpSanISCSITestCase, self).tearDown()
|
super(HpSanISCSITestCase, self).tearDown()
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""Unit tests for the API endpoint"""
|
"""Unit tests for the API endpoint."""
|
||||||
|
|
||||||
import httplib
|
import httplib
|
||||||
import StringIO
|
import StringIO
|
||||||
@ -25,18 +25,18 @@ import webob
|
|||||||
|
|
||||||
|
|
||||||
class FakeHttplibSocket(object):
|
class FakeHttplibSocket(object):
|
||||||
"""a fake socket implementation for httplib.HTTPResponse, trivial"""
|
"""A fake socket implementation for httplib.HTTPResponse, trivial."""
|
||||||
def __init__(self, response_string):
|
def __init__(self, response_string):
|
||||||
self.response_string = response_string
|
self.response_string = response_string
|
||||||
self._buffer = StringIO.StringIO(response_string)
|
self._buffer = StringIO.StringIO(response_string)
|
||||||
|
|
||||||
def makefile(self, _mode, _other):
|
def makefile(self, _mode, _other):
|
||||||
"""Returns the socket's internal buffer"""
|
"""Returns the socket's internal buffer."""
|
||||||
return self._buffer
|
return self._buffer
|
||||||
|
|
||||||
|
|
||||||
class FakeHttplibConnection(object):
|
class FakeHttplibConnection(object):
|
||||||
"""A fake httplib.HTTPConnection for boto to use
|
"""A fake httplib.HTTPConnection for boto.
|
||||||
|
|
||||||
requests made via this connection actually get translated and routed into
|
requests made via this connection actually get translated and routed into
|
||||||
our WSGI app, we then wait for the response and turn it back into
|
our WSGI app, we then wait for the response and turn it back into
|
||||||
@ -71,5 +71,5 @@ class FakeHttplibConnection(object):
|
|||||||
return self.sock.response_string
|
return self.sock.response_string
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
"""Required for compatibility with boto/tornado"""
|
"""Required for compatibility with boto/tornado."""
|
||||||
pass
|
pass
|
||||||
|
@ -31,15 +31,14 @@ class RootwrapTestCase(test.TestCase):
|
|||||||
filters.CommandFilter("/usr/bin/foo_bar_not_exist", "root"),
|
filters.CommandFilter("/usr/bin/foo_bar_not_exist", "root"),
|
||||||
filters.RegExpFilter("/bin/cat", "root", 'cat', '/[a-z]+'),
|
filters.RegExpFilter("/bin/cat", "root", 'cat', '/[a-z]+'),
|
||||||
filters.CommandFilter("/nonexistent/cat", "root"),
|
filters.CommandFilter("/nonexistent/cat", "root"),
|
||||||
filters.CommandFilter("/bin/cat", "root") # Keep this one last
|
filters.CommandFilter("/bin/cat", "root")] # Keep this one last
|
||||||
]
|
|
||||||
|
|
||||||
def test_RegExpFilter_match(self):
|
def test_RegExpFilter_match(self):
|
||||||
usercmd = ["ls", "/root"]
|
usercmd = ["ls", "/root"]
|
||||||
filtermatch = wrapper.match_filter(self.filters, usercmd)
|
filtermatch = wrapper.match_filter(self.filters, usercmd)
|
||||||
self.assertFalse(filtermatch is None)
|
self.assertFalse(filtermatch is None)
|
||||||
self.assertEqual(filtermatch.get_command(usercmd),
|
self.assertEqual(filtermatch.get_command(usercmd),
|
||||||
["/bin/ls", "/root"])
|
["/bin/ls", "/root"])
|
||||||
|
|
||||||
def test_RegExpFilter_reject(self):
|
def test_RegExpFilter_reject(self):
|
||||||
usercmd = ["ls", "root"]
|
usercmd = ["ls", "root"]
|
||||||
@ -92,7 +91,7 @@ class RootwrapTestCase(test.TestCase):
|
|||||||
self.assertTrue(f.match(usercmd) or f2.match(usercmd))
|
self.assertTrue(f.match(usercmd) or f2.match(usercmd))
|
||||||
|
|
||||||
def test_KillFilter_no_raise(self):
|
def test_KillFilter_no_raise(self):
|
||||||
"""Makes sure ValueError from bug 926412 is gone"""
|
"""Makes sure ValueError from bug 926412 is gone."""
|
||||||
f = filters.KillFilter("root", "")
|
f = filters.KillFilter("root", "")
|
||||||
# Providing anything other than kill should be False
|
# Providing anything other than kill should be False
|
||||||
usercmd = ['notkill', 999999]
|
usercmd = ['notkill', 999999]
|
||||||
@ -102,7 +101,7 @@ class RootwrapTestCase(test.TestCase):
|
|||||||
self.assertFalse(f.match(usercmd))
|
self.assertFalse(f.match(usercmd))
|
||||||
|
|
||||||
def test_KillFilter_deleted_exe(self):
|
def test_KillFilter_deleted_exe(self):
|
||||||
"""Makes sure deleted exe's are killed correctly"""
|
"""Makes sure deleted exe's are killed correctly."""
|
||||||
# See bug #967931.
|
# See bug #967931.
|
||||||
def fake_readlink(blah):
|
def fake_readlink(blah):
|
||||||
return '/bin/commandddddd (deleted)'
|
return '/bin/commandddddd (deleted)'
|
||||||
|
@ -63,8 +63,10 @@ class ContextTestCase(test.TestCase):
|
|||||||
|
|
||||||
self.stubs.Set(context.LOG, 'warn', fake_warn)
|
self.stubs.Set(context.LOG, 'warn', fake_warn)
|
||||||
|
|
||||||
c = context.RequestContext('user', 'project',
|
c = context.RequestContext('user',
|
||||||
extra_arg1='meow', extra_arg2='wuff')
|
'project',
|
||||||
|
extra_arg1='meow',
|
||||||
|
extra_arg2='wuff')
|
||||||
self.assertTrue(c)
|
self.assertTrue(c)
|
||||||
self.assertIn("'extra_arg1': 'meow'", info['log_msg'])
|
self.assertIn("'extra_arg1': 'meow'", info['log_msg'])
|
||||||
self.assertIn("'extra_arg2': 'wuff'", info['log_msg'])
|
self.assertIn("'extra_arg2': 'wuff'", info['log_msg'])
|
||||||
|
@ -62,8 +62,8 @@ class FlagsTestCase(test.TestCase):
|
|||||||
def test_long_vs_short_flags(self):
|
def test_long_vs_short_flags(self):
|
||||||
FLAGS.clear()
|
FLAGS.clear()
|
||||||
FLAGS.register_cli_opt(cfg.StrOpt('duplicate_answer_long',
|
FLAGS.register_cli_opt(cfg.StrOpt('duplicate_answer_long',
|
||||||
default='val',
|
default='val',
|
||||||
help='desc'))
|
help='desc'))
|
||||||
argv = ['flags_test', '--duplicate_answer=60', 'extra_arg']
|
argv = ['flags_test', '--duplicate_answer=60', 'extra_arg']
|
||||||
args = flags.parse_args(argv, default_config_files=[])
|
args = flags.parse_args(argv, default_config_files=[])
|
||||||
|
|
||||||
@ -72,8 +72,8 @@ class FlagsTestCase(test.TestCase):
|
|||||||
|
|
||||||
FLAGS.clear()
|
FLAGS.clear()
|
||||||
FLAGS.register_cli_opt(cfg.IntOpt('duplicate_answer',
|
FLAGS.register_cli_opt(cfg.IntOpt('duplicate_answer',
|
||||||
default=60,
|
default=60,
|
||||||
help='desc'))
|
help='desc'))
|
||||||
args = flags.parse_args(argv, default_config_files=[])
|
args = flags.parse_args(argv, default_config_files=[])
|
||||||
self.assertEqual(FLAGS.duplicate_answer, 60)
|
self.assertEqual(FLAGS.duplicate_answer, 60)
|
||||||
self.assertEqual(FLAGS.duplicate_answer_long, 'val')
|
self.assertEqual(FLAGS.duplicate_answer_long, 'val')
|
||||||
|
@ -76,7 +76,7 @@ class TargetAdminTestCase(object):
|
|||||||
tgtadm = iscsi.get_target_admin()
|
tgtadm = iscsi.get_target_admin()
|
||||||
tgtadm.set_execute(self.fake_execute)
|
tgtadm.set_execute(self.fake_execute)
|
||||||
tgtadm.create_iscsi_target(self.target_name, self.tid,
|
tgtadm.create_iscsi_target(self.target_name, self.tid,
|
||||||
self.lun, self.path)
|
self.lun, self.path)
|
||||||
tgtadm.show_target(self.tid, iqn=self.target_name)
|
tgtadm.show_target(self.tid, iqn=self.target_name)
|
||||||
tgtadm.remove_iscsi_target(self.tid, self.lun, self.vol_id)
|
tgtadm.remove_iscsi_target(self.tid, self.lun, self.vol_id)
|
||||||
|
|
||||||
@ -95,8 +95,8 @@ class TgtAdmTestCase(test.TestCase, TargetAdminTestCase):
|
|||||||
self.flags(iscsi_helper='tgtadm')
|
self.flags(iscsi_helper='tgtadm')
|
||||||
self.flags(volumes_dir=self.persist_tempdir)
|
self.flags(volumes_dir=self.persist_tempdir)
|
||||||
self.script_template = "\n".join([
|
self.script_template = "\n".join([
|
||||||
'tgt-admin --update iqn.2011-09.org.foo.bar:blaa',
|
'tgt-admin --update iqn.2011-09.org.foo.bar:blaa',
|
||||||
'tgt-admin --delete iqn.2010-10.org.openstack:volume-blaa'])
|
'tgt-admin --delete iqn.2010-10.org.openstack:volume-blaa'])
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
try:
|
try:
|
||||||
@ -113,9 +113,9 @@ class IetAdmTestCase(test.TestCase, TargetAdminTestCase):
|
|||||||
TargetAdminTestCase.setUp(self)
|
TargetAdminTestCase.setUp(self)
|
||||||
self.flags(iscsi_helper='ietadm')
|
self.flags(iscsi_helper='ietadm')
|
||||||
self.script_template = "\n".join([
|
self.script_template = "\n".join([
|
||||||
'ietadm --op new --tid=%(tid)s --params Name=%(target_name)s',
|
'ietadm --op new --tid=%(tid)s --params Name=%(target_name)s',
|
||||||
'ietadm --op new --tid=%(tid)s --lun=%(lun)s '
|
'ietadm --op new --tid=%(tid)s --lun=%(lun)s '
|
||||||
'--params Path=%(path)s,Type=fileio',
|
'--params Path=%(path)s,Type=fileio',
|
||||||
'ietadm --op show --tid=%(tid)s',
|
'ietadm --op show --tid=%(tid)s',
|
||||||
'ietadm --op delete --tid=%(tid)s --lun=%(lun)s',
|
'ietadm --op delete --tid=%(tid)s --lun=%(lun)s',
|
||||||
'ietadm --op delete --tid=%(tid)s'])
|
'ietadm --op delete --tid=%(tid)s'])
|
||||||
|
@ -76,7 +76,7 @@ def _have_mysql():
|
|||||||
|
|
||||||
|
|
||||||
class TestMigrations(test.TestCase):
|
class TestMigrations(test.TestCase):
|
||||||
"""Test sqlalchemy-migrate migrations"""
|
"""Test sqlalchemy-migrate migrations."""
|
||||||
|
|
||||||
TEST_DATABASES = {}
|
TEST_DATABASES = {}
|
||||||
DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
|
DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
|
||||||
@ -87,7 +87,7 @@ class TestMigrations(test.TestCase):
|
|||||||
DEFAULT_CONFIG_FILE)
|
DEFAULT_CONFIG_FILE)
|
||||||
MIGRATE_FILE = cinder.db.sqlalchemy.migrate_repo.__file__
|
MIGRATE_FILE = cinder.db.sqlalchemy.migrate_repo.__file__
|
||||||
REPOSITORY = repository.Repository(
|
REPOSITORY = repository.Repository(
|
||||||
os.path.abspath(os.path.dirname(MIGRATE_FILE)))
|
os.path.abspath(os.path.dirname(MIGRATE_FILE)))
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(TestMigrations, self).setUp()
|
super(TestMigrations, self).setUp()
|
||||||
@ -256,11 +256,12 @@ class TestMigrations(test.TestCase):
|
|||||||
# upgrades successfully.
|
# upgrades successfully.
|
||||||
|
|
||||||
# Place the database under version control
|
# Place the database under version control
|
||||||
migration_api.version_control(engine, TestMigrations.REPOSITORY,
|
migration_api.version_control(engine,
|
||||||
migration.INIT_VERSION)
|
TestMigrations.REPOSITORY,
|
||||||
|
migration.INIT_VERSION)
|
||||||
self.assertEqual(migration.INIT_VERSION,
|
self.assertEqual(migration.INIT_VERSION,
|
||||||
migration_api.db_version(engine,
|
migration_api.db_version(engine,
|
||||||
TestMigrations.REPOSITORY))
|
TestMigrations.REPOSITORY))
|
||||||
|
|
||||||
migration_api.upgrade(engine, TestMigrations.REPOSITORY,
|
migration_api.upgrade(engine, TestMigrations.REPOSITORY,
|
||||||
migration.INIT_VERSION + 1)
|
migration.INIT_VERSION + 1)
|
||||||
@ -268,7 +269,7 @@ class TestMigrations(test.TestCase):
|
|||||||
LOG.debug('latest version is %s' % TestMigrations.REPOSITORY.latest)
|
LOG.debug('latest version is %s' % TestMigrations.REPOSITORY.latest)
|
||||||
|
|
||||||
for version in xrange(migration.INIT_VERSION + 2,
|
for version in xrange(migration.INIT_VERSION + 2,
|
||||||
TestMigrations.REPOSITORY.latest + 1):
|
TestMigrations.REPOSITORY.latest + 1):
|
||||||
# upgrade -> downgrade -> upgrade
|
# upgrade -> downgrade -> upgrade
|
||||||
self._migrate_up(engine, version)
|
self._migrate_up(engine, version)
|
||||||
if snake_walk:
|
if snake_walk:
|
||||||
@ -300,5 +301,5 @@ class TestMigrations(test.TestCase):
|
|||||||
TestMigrations.REPOSITORY,
|
TestMigrations.REPOSITORY,
|
||||||
version)
|
version)
|
||||||
self.assertEqual(version,
|
self.assertEqual(version,
|
||||||
migration_api.db_version(engine,
|
migration_api.db_version(engine,
|
||||||
TestMigrations.REPOSITORY))
|
TestMigrations.REPOSITORY))
|
||||||
|
@ -578,21 +578,21 @@ RESPONSE_PREFIX = """<?xml version="1.0" encoding="UTF-8"?>
|
|||||||
RESPONSE_SUFFIX = """</env:Body></env:Envelope>"""
|
RESPONSE_SUFFIX = """</env:Body></env:Envelope>"""
|
||||||
|
|
||||||
APIS = ['ApiProxy', 'DatasetListInfoIterStart', 'DatasetListInfoIterNext',
|
APIS = ['ApiProxy', 'DatasetListInfoIterStart', 'DatasetListInfoIterNext',
|
||||||
'DatasetListInfoIterEnd', 'DatasetEditBegin', 'DatasetEditCommit',
|
'DatasetListInfoIterEnd', 'DatasetEditBegin', 'DatasetEditCommit',
|
||||||
'DatasetProvisionMember', 'DatasetRemoveMember', 'DfmAbout',
|
'DatasetProvisionMember', 'DatasetRemoveMember', 'DfmAbout',
|
||||||
'DpJobProgressEventListIterStart', 'DpJobProgressEventListIterNext',
|
'DpJobProgressEventListIterStart', 'DpJobProgressEventListIterNext',
|
||||||
'DpJobProgressEventListIterEnd', 'DatasetMemberListInfoIterStart',
|
'DpJobProgressEventListIterEnd', 'DatasetMemberListInfoIterStart',
|
||||||
'DatasetMemberListInfoIterNext', 'DatasetMemberListInfoIterEnd',
|
'DatasetMemberListInfoIterNext', 'DatasetMemberListInfoIterEnd',
|
||||||
'HostListInfoIterStart', 'HostListInfoIterNext', 'HostListInfoIterEnd',
|
'HostListInfoIterStart', 'HostListInfoIterNext', 'HostListInfoIterEnd',
|
||||||
'LunListInfoIterStart', 'LunListInfoIterNext', 'LunListInfoIterEnd',
|
'LunListInfoIterStart', 'LunListInfoIterNext', 'LunListInfoIterEnd',
|
||||||
'StorageServiceDatasetProvision']
|
'StorageServiceDatasetProvision']
|
||||||
|
|
||||||
iter_count = 0
|
iter_count = 0
|
||||||
iter_table = {}
|
iter_table = {}
|
||||||
|
|
||||||
|
|
||||||
class FakeDfmServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
|
class FakeDfmServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
|
||||||
"""HTTP handler that fakes enough stuff to allow the driver to run"""
|
"""HTTP handler that fakes enough stuff to allow the driver to run."""
|
||||||
|
|
||||||
def do_GET(s):
|
def do_GET(s):
|
||||||
"""Respond to a GET request."""
|
"""Respond to a GET request."""
|
||||||
@ -622,7 +622,7 @@ class FakeDfmServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
|
|||||||
out.write('</portType>')
|
out.write('</portType>')
|
||||||
out.write('<binding name="DfmBinding" type="na:DfmInterface">')
|
out.write('<binding name="DfmBinding" type="na:DfmInterface">')
|
||||||
out.write('<soap:binding style="document" ' +
|
out.write('<soap:binding style="document" ' +
|
||||||
'transport="http://schemas.xmlsoap.org/soap/http"/>')
|
'transport="http://schemas.xmlsoap.org/soap/http"/>')
|
||||||
for api in APIS:
|
for api in APIS:
|
||||||
out.write('<operation name="%s">' % api)
|
out.write('<operation name="%s">' % api)
|
||||||
out.write('<soap:operation soapAction="urn:%s"/>' % api)
|
out.write('<soap:operation soapAction="urn:%s"/>' % api)
|
||||||
@ -641,7 +641,7 @@ class FakeDfmServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
|
|||||||
request_xml = s.rfile.read(int(s.headers['Content-Length']))
|
request_xml = s.rfile.read(int(s.headers['Content-Length']))
|
||||||
ntap_ns = 'http://www.netapp.com/management/v1'
|
ntap_ns = 'http://www.netapp.com/management/v1'
|
||||||
nsmap = {'env': 'http://schemas.xmlsoap.org/soap/envelope/',
|
nsmap = {'env': 'http://schemas.xmlsoap.org/soap/envelope/',
|
||||||
'na': ntap_ns}
|
'na': ntap_ns}
|
||||||
root = etree.fromstring(request_xml)
|
root = etree.fromstring(request_xml)
|
||||||
|
|
||||||
body = root.xpath('/env:Envelope/env:Body', namespaces=nsmap)[0]
|
body = root.xpath('/env:Envelope/env:Body', namespaces=nsmap)[0]
|
||||||
@ -977,7 +977,7 @@ class NetAppDriverTestCase(test.TestCase):
|
|||||||
self.driver._provision(self.VOLUME_NAME, None, self.PROJECT_ID,
|
self.driver._provision(self.VOLUME_NAME, None, self.PROJECT_ID,
|
||||||
self.VOLUME_TYPE, self.VOLUME_SIZE)
|
self.VOLUME_TYPE, self.VOLUME_SIZE)
|
||||||
volume = {'name': self.VOLUME_NAME, 'project_id': self.PROJECT_ID,
|
volume = {'name': self.VOLUME_NAME, 'project_id': self.PROJECT_ID,
|
||||||
'id': 0, 'provider_auth': None}
|
'id': 0, 'provider_auth': None}
|
||||||
updates = self.driver._get_export(volume)
|
updates = self.driver._get_export(volume)
|
||||||
self.assertTrue(updates['provider_location'])
|
self.assertTrue(updates['provider_location'])
|
||||||
volume['provider_location'] = updates['provider_location']
|
volume['provider_location'] = updates['provider_location']
|
||||||
@ -1193,7 +1193,7 @@ class FakeCMODEServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
|
|||||||
out.write('<binding name="CloudStorageBinding" '
|
out.write('<binding name="CloudStorageBinding" '
|
||||||
'type="na:CloudStorage">')
|
'type="na:CloudStorage">')
|
||||||
out.write('<soap:binding style="document" ' +
|
out.write('<soap:binding style="document" ' +
|
||||||
'transport="http://schemas.xmlsoap.org/soap/http"/>')
|
'transport="http://schemas.xmlsoap.org/soap/http"/>')
|
||||||
for api in CMODE_APIS:
|
for api in CMODE_APIS:
|
||||||
out.write('<operation name="%s">' % api)
|
out.write('<operation name="%s">' % api)
|
||||||
out.write('<soap:operation soapAction=""/>')
|
out.write('<soap:operation soapAction=""/>')
|
||||||
@ -1212,7 +1212,7 @@ class FakeCMODEServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
|
|||||||
request_xml = s.rfile.read(int(s.headers['Content-Length']))
|
request_xml = s.rfile.read(int(s.headers['Content-Length']))
|
||||||
ntap_ns = 'http://cloud.netapp.com/'
|
ntap_ns = 'http://cloud.netapp.com/'
|
||||||
nsmap = {'soapenv': 'http://schemas.xmlsoap.org/soap/envelope/',
|
nsmap = {'soapenv': 'http://schemas.xmlsoap.org/soap/envelope/',
|
||||||
'na': ntap_ns}
|
'na': ntap_ns}
|
||||||
root = etree.fromstring(request_xml)
|
root = etree.fromstring(request_xml)
|
||||||
|
|
||||||
body = root.xpath('/soapenv:Envelope/soapenv:Body',
|
body = root.xpath('/soapenv:Envelope/soapenv:Body',
|
||||||
@ -1322,24 +1322,18 @@ class FakeCmodeHTTPConnection(object):
|
|||||||
|
|
||||||
class NetAppCmodeISCSIDriverTestCase(test.TestCase):
|
class NetAppCmodeISCSIDriverTestCase(test.TestCase):
|
||||||
"""Test case for NetAppISCSIDriver"""
|
"""Test case for NetAppISCSIDriver"""
|
||||||
volume = {
|
volume = {'name': 'lun1', 'size': 1, 'volume_name': 'lun1',
|
||||||
'name': 'lun1', 'size': 1, 'volume_name': 'lun1',
|
'os_type': 'linux', 'provider_location': 'lun1',
|
||||||
'os_type': 'linux', 'provider_location': 'lun1',
|
'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
|
||||||
'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
|
'display_name': None, 'display_description': 'lun1',
|
||||||
'display_name': None, 'display_description': 'lun1',
|
'volume_type_id': None}
|
||||||
'volume_type_id': None
|
snapshot = {'name': 'lun2', 'size': 1, 'volume_name': 'lun1',
|
||||||
}
|
'volume_size': 1, 'project_id': 'project'}
|
||||||
snapshot = {
|
volume_sec = {'name': 'vol_snapshot', 'size': 1, 'volume_name': 'lun1',
|
||||||
'name': 'lun2', 'size': 1, 'volume_name': 'lun1',
|
'os_type': 'linux', 'provider_location': 'lun1',
|
||||||
'volume_size': 1, 'project_id': 'project'
|
'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
|
||||||
}
|
'display_name': None, 'display_description': 'lun1',
|
||||||
volume_sec = {
|
'volume_type_id': None}
|
||||||
'name': 'vol_snapshot', 'size': 1, 'volume_name': 'lun1',
|
|
||||||
'os_type': 'linux', 'provider_location': 'lun1',
|
|
||||||
'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
|
|
||||||
'display_name': None, 'display_description': 'lun1',
|
|
||||||
'volume_type_id': None
|
|
||||||
}
|
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(NetAppCmodeISCSIDriverTestCase, self).setUp()
|
super(NetAppCmodeISCSIDriverTestCase, self).setUp()
|
||||||
@ -1371,7 +1365,7 @@ class NetAppCmodeISCSIDriverTestCase(test.TestCase):
|
|||||||
self.volume['provider_location'] = updates['provider_location']
|
self.volume['provider_location'] = updates['provider_location']
|
||||||
connector = {'initiator': 'init1'}
|
connector = {'initiator': 'init1'}
|
||||||
connection_info = self.driver.initialize_connection(self.volume,
|
connection_info = self.driver.initialize_connection(self.volume,
|
||||||
connector)
|
connector)
|
||||||
self.assertEqual(connection_info['driver_volume_type'], 'iscsi')
|
self.assertEqual(connection_info['driver_volume_type'], 'iscsi')
|
||||||
properties = connection_info['data']
|
properties = connection_info['data']
|
||||||
self.driver.terminate_connection(self.volume, connector)
|
self.driver.terminate_connection(self.volume, connector)
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
"""Unit tests for the NetApp-specific NFS driver module (netapp_nfs)"""
|
"""Unit tests for the NetApp-specific NFS driver module (netapp_nfs)."""
|
||||||
|
|
||||||
from cinder import context
|
from cinder import context
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
@ -67,7 +67,7 @@ class FakeResponce(object):
|
|||||||
|
|
||||||
|
|
||||||
class NetappNfsDriverTestCase(test.TestCase):
|
class NetappNfsDriverTestCase(test.TestCase):
|
||||||
"""Test case for NetApp specific NFS clone driver"""
|
"""Test case for NetApp specific NFS clone driver."""
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self._driver = netapp_nfs.NetAppNFSDriver()
|
self._driver = netapp_nfs.NetAppNFSDriver()
|
||||||
@ -79,13 +79,11 @@ class NetappNfsDriverTestCase(test.TestCase):
|
|||||||
def test_check_for_setup_error(self):
|
def test_check_for_setup_error(self):
|
||||||
mox = self._mox
|
mox = self._mox
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
required_flags = [
|
required_flags = ['netapp_wsdl_url',
|
||||||
'netapp_wsdl_url',
|
'netapp_login',
|
||||||
'netapp_login',
|
'netapp_password',
|
||||||
'netapp_password',
|
'netapp_server_hostname',
|
||||||
'netapp_server_hostname',
|
'netapp_server_port']
|
||||||
'netapp_server_port'
|
|
||||||
]
|
|
||||||
|
|
||||||
# check exception raises when flags are not set
|
# check exception raises when flags are not set
|
||||||
self.assertRaises(exception.CinderException,
|
self.assertRaises(exception.CinderException,
|
||||||
@ -124,7 +122,7 @@ class NetappNfsDriverTestCase(test.TestCase):
|
|||||||
mox.VerifyAll()
|
mox.VerifyAll()
|
||||||
|
|
||||||
def test_create_snapshot(self):
|
def test_create_snapshot(self):
|
||||||
"""Test snapshot can be created and deleted"""
|
"""Test snapshot can be created and deleted."""
|
||||||
mox = self._mox
|
mox = self._mox
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
@ -137,7 +135,7 @@ class NetappNfsDriverTestCase(test.TestCase):
|
|||||||
mox.VerifyAll()
|
mox.VerifyAll()
|
||||||
|
|
||||||
def test_create_volume_from_snapshot(self):
|
def test_create_volume_from_snapshot(self):
|
||||||
"""Tests volume creation from snapshot"""
|
"""Tests volume creation from snapshot."""
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
mox = self._mox
|
mox = self._mox
|
||||||
volume = FakeVolume(1)
|
volume = FakeVolume(1)
|
||||||
@ -177,8 +175,8 @@ class NetappNfsDriverTestCase(test.TestCase):
|
|||||||
mox.StubOutWithMock(drv, '_get_volume_path')
|
mox.StubOutWithMock(drv, '_get_volume_path')
|
||||||
|
|
||||||
drv._get_provider_location(IgnoreArg())
|
drv._get_provider_location(IgnoreArg())
|
||||||
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
|
drv._volume_not_present(IgnoreArg(),
|
||||||
.AndReturn(not snapshot_exists)
|
IgnoreArg()).AndReturn(not snapshot_exists)
|
||||||
|
|
||||||
if snapshot_exists:
|
if snapshot_exists:
|
||||||
drv._get_volume_path(IgnoreArg(), IgnoreArg())
|
drv._get_volume_path(IgnoreArg(), IgnoreArg())
|
||||||
|
@ -113,26 +113,21 @@ class TestNexentaDriver(cinder.test.TestCase):
|
|||||||
('iscsitarget', 'create_target', ({'target_name': 'iqn:volume1'},),
|
('iscsitarget', 'create_target', ({'target_name': 'iqn:volume1'},),
|
||||||
u'Unable to create iscsi target\n'
|
u'Unable to create iscsi target\n'
|
||||||
u' iSCSI target iqn.1986-03.com.sun:02:cinder-volume1 already'
|
u' iSCSI target iqn.1986-03.com.sun:02:cinder-volume1 already'
|
||||||
u' configured\n'
|
u' configured\n'
|
||||||
u' itadm create-target failed with error 17\n',
|
u' itadm create-target failed with error 17\n', ),
|
||||||
),
|
|
||||||
('stmf', 'create_targetgroup', ('cinder/volume1',),
|
('stmf', 'create_targetgroup', ('cinder/volume1',),
|
||||||
u'Unable to create targetgroup: stmfadm: cinder/volume1:'
|
u'Unable to create targetgroup: stmfadm: cinder/volume1:'
|
||||||
u' already exists\n',
|
u' already exists\n', ),
|
||||||
),
|
|
||||||
('stmf', 'add_targetgroup_member', ('cinder/volume1', 'iqn:volume1'),
|
('stmf', 'add_targetgroup_member', ('cinder/volume1', 'iqn:volume1'),
|
||||||
u'Unable to add member to targetgroup: stmfadm:'
|
u'Unable to add member to targetgroup: stmfadm:'
|
||||||
u' iqn.1986-03.com.sun:02:cinder-volume1: already exists\n',
|
u' iqn.1986-03.com.sun:02:cinder-volume1: already exists\n', ),
|
||||||
),
|
|
||||||
('scsidisk', 'create_lu', ('cinder/volume1', {}),
|
('scsidisk', 'create_lu', ('cinder/volume1', {}),
|
||||||
u"Unable to create lu with zvol 'cinder/volume1':\n"
|
u"Unable to create lu with zvol 'cinder/volume1':\n"
|
||||||
u" sbdadm: filename /dev/zvol/rdsk/cinder/volume1: in use\n",
|
u" sbdadm: filename /dev/zvol/rdsk/cinder/volume1: in use\n", ),
|
||||||
),
|
|
||||||
('scsidisk', 'add_lun_mapping_entry', ('cinder/volume1', {
|
('scsidisk', 'add_lun_mapping_entry', ('cinder/volume1', {
|
||||||
'target_group': 'cinder/volume1', 'lun': '0'}),
|
'target_group': 'cinder/volume1', 'lun': '0'}),
|
||||||
u"Unable to add view to zvol 'cinder/volume1' (LUNs in use: ):\n"
|
u"Unable to add view to zvol 'cinder/volume1' (LUNs in use: ):\n"
|
||||||
u" stmfadm: view entry exists\n",
|
u" stmfadm: view entry exists\n", ),
|
||||||
),
|
|
||||||
]
|
]
|
||||||
|
|
||||||
def _stub_export_method(self, module, method, args, error, fail=False):
|
def _stub_export_method(self, module, method, args, error, fail=False):
|
||||||
@ -150,7 +145,8 @@ class TestNexentaDriver(cinder.test.TestCase):
|
|||||||
self._stub_all_export_methods()
|
self._stub_all_export_methods()
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
retval = self.drv.create_export({}, self.TEST_VOLUME_REF)
|
retval = self.drv.create_export({}, self.TEST_VOLUME_REF)
|
||||||
self.assertEquals(retval,
|
self.assertEquals(
|
||||||
|
retval,
|
||||||
{'provider_location':
|
{'provider_location':
|
||||||
'%s:%s,1 %s%s' % (FLAGS.nexenta_host,
|
'%s:%s,1 %s%s' % (FLAGS.nexenta_host,
|
||||||
FLAGS.nexenta_iscsi_target_portal_port,
|
FLAGS.nexenta_iscsi_target_portal_port,
|
||||||
@ -165,7 +161,9 @@ class TestNexentaDriver(cinder.test.TestCase):
|
|||||||
fail=True)
|
fail=True)
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
self.assertRaises(nexenta.NexentaException,
|
self.assertRaises(nexenta.NexentaException,
|
||||||
self.drv.create_export, {}, self.TEST_VOLUME_REF)
|
self.drv.create_export,
|
||||||
|
{},
|
||||||
|
self.TEST_VOLUME_REF)
|
||||||
return _test_create_export_fail
|
return _test_create_export_fail
|
||||||
|
|
||||||
for i in range(len(_CREATE_EXPORT_METHODS)):
|
for i in range(len(_CREATE_EXPORT_METHODS)):
|
||||||
@ -185,8 +183,8 @@ class TestNexentaDriver(cinder.test.TestCase):
|
|||||||
|
|
||||||
def test_remove_export_fail_0(self):
|
def test_remove_export_fail_0(self):
|
||||||
self.nms_mock.scsidisk.delete_lu('cinder/volume1')
|
self.nms_mock.scsidisk.delete_lu('cinder/volume1')
|
||||||
self.nms_mock.stmf.destroy_targetgroup('cinder/volume1').AndRaise(
|
self.nms_mock.stmf.destroy_targetgroup(
|
||||||
nexenta.NexentaException())
|
'cinder/volume1').AndRaise(nexenta.NexentaException())
|
||||||
self.nms_mock.iscsitarget.delete_target('iqn:volume1')
|
self.nms_mock.iscsitarget.delete_target('iqn:volume1')
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
self.drv.remove_export({}, self.TEST_VOLUME_REF)
|
self.drv.remove_export({}, self.TEST_VOLUME_REF)
|
||||||
@ -194,8 +192,8 @@ class TestNexentaDriver(cinder.test.TestCase):
|
|||||||
def test_remove_export_fail_1(self):
|
def test_remove_export_fail_1(self):
|
||||||
self.nms_mock.scsidisk.delete_lu('cinder/volume1')
|
self.nms_mock.scsidisk.delete_lu('cinder/volume1')
|
||||||
self.nms_mock.stmf.destroy_targetgroup('cinder/volume1')
|
self.nms_mock.stmf.destroy_targetgroup('cinder/volume1')
|
||||||
self.nms_mock.iscsitarget.delete_target('iqn:volume1').AndRaise(
|
self.nms_mock.iscsitarget.delete_target(
|
||||||
nexenta.NexentaException())
|
'iqn:volume1').AndRaise(nexenta.NexentaException())
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
self.drv.remove_export({}, self.TEST_VOLUME_REF)
|
self.drv.remove_export({}, self.TEST_VOLUME_REF)
|
||||||
|
|
||||||
@ -205,9 +203,9 @@ class TestNexentaJSONRPC(cinder.test.TestCase):
|
|||||||
URL_S = 'https://example.com/'
|
URL_S = 'https://example.com/'
|
||||||
USER = 'user'
|
USER = 'user'
|
||||||
PASSWORD = 'password'
|
PASSWORD = 'password'
|
||||||
HEADERS = {'Authorization': 'Basic %s' % (base64.b64encode(
|
HEADERS = {'Authorization': 'Basic %s' % (
|
||||||
':'.join((USER, PASSWORD))),),
|
base64.b64encode(':'.join((USER, PASSWORD))),),
|
||||||
'Content-Type': 'application/json'}
|
'Content-Type': 'application/json'}
|
||||||
REQUEST = 'the request'
|
REQUEST = 'the request'
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
@ -222,21 +220,23 @@ class TestNexentaJSONRPC(cinder.test.TestCase):
|
|||||||
urllib2.urlopen(self.REQUEST).AndReturn(self.resp_mock)
|
urllib2.urlopen(self.REQUEST).AndReturn(self.resp_mock)
|
||||||
|
|
||||||
def test_call(self):
|
def test_call(self):
|
||||||
urllib2.Request(self.URL,
|
urllib2.Request(
|
||||||
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
|
self.URL,
|
||||||
self.HEADERS).AndReturn(self.REQUEST)
|
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
|
||||||
|
self.HEADERS).AndReturn(self.REQUEST)
|
||||||
self.resp_info_mock.status = ''
|
self.resp_info_mock.status = ''
|
||||||
self.resp_mock.read().AndReturn(
|
self.resp_mock.read().AndReturn(
|
||||||
'{"error": null, "result": "the result"}')
|
'{"error": null, "result": "the result"}')
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
result = self.proxy('arg1', 'arg2')
|
result = self.proxy('arg1', 'arg2')
|
||||||
self.assertEquals("the result", result)
|
self.assertEquals("the result", result)
|
||||||
|
|
||||||
def test_call_deep(self):
|
def test_call_deep(self):
|
||||||
urllib2.Request(self.URL,
|
urllib2.Request(
|
||||||
'{"object": "obj1.subobj", "params": ["arg1", "arg2"],'
|
self.URL,
|
||||||
' "method": "meth"}',
|
'{"object": "obj1.subobj", "params": ["arg1", "arg2"],'
|
||||||
self.HEADERS).AndReturn(self.REQUEST)
|
' "method": "meth"}',
|
||||||
|
self.HEADERS).AndReturn(self.REQUEST)
|
||||||
self.resp_info_mock.status = ''
|
self.resp_info_mock.status = ''
|
||||||
self.resp_mock.read().AndReturn(
|
self.resp_mock.read().AndReturn(
|
||||||
'{"error": null, "result": "the result"}')
|
'{"error": null, "result": "the result"}')
|
||||||
@ -245,12 +245,14 @@ class TestNexentaJSONRPC(cinder.test.TestCase):
|
|||||||
self.assertEquals("the result", result)
|
self.assertEquals("the result", result)
|
||||||
|
|
||||||
def test_call_auto(self):
|
def test_call_auto(self):
|
||||||
urllib2.Request(self.URL,
|
urllib2.Request(
|
||||||
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
|
self.URL,
|
||||||
self.HEADERS).AndReturn(self.REQUEST)
|
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
|
||||||
urllib2.Request(self.URL_S,
|
self.HEADERS).AndReturn(self.REQUEST)
|
||||||
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
|
urllib2.Request(
|
||||||
self.HEADERS).AndReturn(self.REQUEST)
|
self.URL_S,
|
||||||
|
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
|
||||||
|
self.HEADERS).AndReturn(self.REQUEST)
|
||||||
self.resp_info_mock.status = 'EOF in headers'
|
self.resp_info_mock.status = 'EOF in headers'
|
||||||
self.resp_mock.read().AndReturn(
|
self.resp_mock.read().AndReturn(
|
||||||
'{"error": null, "result": "the result"}')
|
'{"error": null, "result": "the result"}')
|
||||||
@ -260,9 +262,10 @@ class TestNexentaJSONRPC(cinder.test.TestCase):
|
|||||||
self.assertEquals("the result", result)
|
self.assertEquals("the result", result)
|
||||||
|
|
||||||
def test_call_error(self):
|
def test_call_error(self):
|
||||||
urllib2.Request(self.URL,
|
urllib2.Request(
|
||||||
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
|
self.URL,
|
||||||
self.HEADERS).AndReturn(self.REQUEST)
|
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
|
||||||
|
self.HEADERS).AndReturn(self.REQUEST)
|
||||||
self.resp_info_mock.status = ''
|
self.resp_info_mock.status = ''
|
||||||
self.resp_mock.read().AndReturn(
|
self.resp_mock.read().AndReturn(
|
||||||
'{"error": {"message": "the error"}, "result": "the result"}')
|
'{"error": {"message": "the error"}, "result": "the result"}')
|
||||||
@ -271,9 +274,10 @@ class TestNexentaJSONRPC(cinder.test.TestCase):
|
|||||||
self.proxy, 'arg1', 'arg2')
|
self.proxy, 'arg1', 'arg2')
|
||||||
|
|
||||||
def test_call_fail(self):
|
def test_call_fail(self):
|
||||||
urllib2.Request(self.URL,
|
urllib2.Request(
|
||||||
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
|
self.URL,
|
||||||
self.HEADERS).AndReturn(self.REQUEST)
|
'{"object": null, "params": ["arg1", "arg2"], "method": null}',
|
||||||
|
self.HEADERS).AndReturn(self.REQUEST)
|
||||||
self.resp_info_mock.status = 'EOF in headers'
|
self.resp_info_mock.status = 'EOF in headers'
|
||||||
self.proxy.auto = False
|
self.proxy.auto = False
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
"""Unit tests for the NFS driver module"""
|
"""Unit tests for the NFS driver module."""
|
||||||
|
|
||||||
import __builtin__
|
import __builtin__
|
||||||
import errno
|
import errno
|
||||||
@ -44,7 +44,7 @@ class DumbVolume(object):
|
|||||||
|
|
||||||
|
|
||||||
class NfsDriverTestCase(test.TestCase):
|
class NfsDriverTestCase(test.TestCase):
|
||||||
"""Test case for NFS driver"""
|
"""Test case for NFS driver."""
|
||||||
|
|
||||||
TEST_NFS_EXPORT1 = 'nfs-host1:/export'
|
TEST_NFS_EXPORT1 = 'nfs-host1:/export'
|
||||||
TEST_NFS_EXPORT2 = 'nfs-host2:/export'
|
TEST_NFS_EXPORT2 = 'nfs-host2:/export'
|
||||||
@ -71,7 +71,7 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
self.stubs.Set(obj, attr_name, stub)
|
self.stubs.Set(obj, attr_name, stub)
|
||||||
|
|
||||||
def test_path_exists_should_return_true(self):
|
def test_path_exists_should_return_true(self):
|
||||||
"""_path_exists should return True if stat returns 0"""
|
"""_path_exists should return True if stat returns 0."""
|
||||||
mox = self._mox
|
mox = self._mox
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
@ -85,14 +85,17 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
mox.VerifyAll()
|
mox.VerifyAll()
|
||||||
|
|
||||||
def test_path_exists_should_return_false(self):
|
def test_path_exists_should_return_false(self):
|
||||||
"""_path_exists should return True if stat doesn't return 0"""
|
"""_path_exists should return True if stat doesn't return 0."""
|
||||||
mox = self._mox
|
mox = self._mox
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
mox.StubOutWithMock(drv, '_execute')
|
mox.StubOutWithMock(drv, '_execute')
|
||||||
drv._execute('stat', self.TEST_FILE_NAME, run_as_root=True).\
|
drv._execute(
|
||||||
|
'stat',
|
||||||
|
self.TEST_FILE_NAME, run_as_root=True).\
|
||||||
AndRaise(ProcessExecutionError(
|
AndRaise(ProcessExecutionError(
|
||||||
stderr="stat: cannot stat `test.txt': No such file or directory"))
|
stderr="stat: cannot stat `test.txt': No such file "
|
||||||
|
"or directory"))
|
||||||
|
|
||||||
mox.ReplayAll()
|
mox.ReplayAll()
|
||||||
|
|
||||||
@ -101,7 +104,7 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
mox.VerifyAll()
|
mox.VerifyAll()
|
||||||
|
|
||||||
def test_local_path(self):
|
def test_local_path(self):
|
||||||
"""local_path common use case"""
|
"""local_path common use case."""
|
||||||
nfs.FLAGS.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
|
nfs.FLAGS.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
@ -114,7 +117,7 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
drv.local_path(volume))
|
drv.local_path(volume))
|
||||||
|
|
||||||
def test_mount_nfs_should_mount_correctly(self):
|
def test_mount_nfs_should_mount_correctly(self):
|
||||||
"""_mount_nfs common case usage"""
|
"""_mount_nfs common case usage."""
|
||||||
mox = self._mox
|
mox = self._mox
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
@ -144,7 +147,7 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
drv._execute('mount', '-t', 'nfs', self.TEST_NFS_EXPORT1,
|
drv._execute('mount', '-t', 'nfs', self.TEST_NFS_EXPORT1,
|
||||||
self.TEST_MNT_POINT, run_as_root=True).\
|
self.TEST_MNT_POINT, run_as_root=True).\
|
||||||
AndRaise(ProcessExecutionError(
|
AndRaise(ProcessExecutionError(
|
||||||
stderr='is busy or already mounted'))
|
stderr='is busy or already mounted'))
|
||||||
|
|
||||||
mox.ReplayAll()
|
mox.ReplayAll()
|
||||||
|
|
||||||
@ -162,9 +165,13 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
drv._path_exists(self.TEST_MNT_POINT).AndReturn(True)
|
drv._path_exists(self.TEST_MNT_POINT).AndReturn(True)
|
||||||
|
|
||||||
mox.StubOutWithMock(drv, '_execute')
|
mox.StubOutWithMock(drv, '_execute')
|
||||||
drv._execute('mount', '-t', 'nfs', self.TEST_NFS_EXPORT1,
|
drv._execute(
|
||||||
self.TEST_MNT_POINT, run_as_root=True).\
|
'mount',
|
||||||
AndRaise(ProcessExecutionError(stderr='is busy or already mounted'))
|
'-t',
|
||||||
|
'nfs',
|
||||||
|
self.TEST_NFS_EXPORT1, self.TEST_MNT_POINT, run_as_root=True).\
|
||||||
|
AndRaise(ProcessExecutionError(stderr='is busy or '
|
||||||
|
'already mounted'))
|
||||||
|
|
||||||
mox.ReplayAll()
|
mox.ReplayAll()
|
||||||
|
|
||||||
@ -175,7 +182,7 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
mox.VerifyAll()
|
mox.VerifyAll()
|
||||||
|
|
||||||
def test_mount_nfs_should_create_mountpoint_if_not_yet(self):
|
def test_mount_nfs_should_create_mountpoint_if_not_yet(self):
|
||||||
"""_mount_nfs should create mountpoint if it doesn't exist"""
|
"""_mount_nfs should create mountpoint if it doesn't exist."""
|
||||||
mox = self._mox
|
mox = self._mox
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
@ -193,7 +200,7 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
mox.VerifyAll()
|
mox.VerifyAll()
|
||||||
|
|
||||||
def test_mount_nfs_should_not_create_mountpoint_if_already(self):
|
def test_mount_nfs_should_not_create_mountpoint_if_already(self):
|
||||||
"""_mount_nfs should not create mountpoint if it already exists"""
|
"""_mount_nfs should not create mountpoint if it already exists."""
|
||||||
mox = self._mox
|
mox = self._mox
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
@ -210,14 +217,14 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
mox.VerifyAll()
|
mox.VerifyAll()
|
||||||
|
|
||||||
def test_get_hash_str(self):
|
def test_get_hash_str(self):
|
||||||
"""_get_hash_str should calculation correct value"""
|
"""_get_hash_str should calculation correct value."""
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
self.assertEqual('2f4f60214cf43c595666dd815f0360a4',
|
self.assertEqual('2f4f60214cf43c595666dd815f0360a4',
|
||||||
drv._get_hash_str(self.TEST_NFS_EXPORT1))
|
drv._get_hash_str(self.TEST_NFS_EXPORT1))
|
||||||
|
|
||||||
def test_get_mount_point_for_share(self):
|
def test_get_mount_point_for_share(self):
|
||||||
"""_get_mount_point_for_share should calculate correct value"""
|
"""_get_mount_point_for_share should calculate correct value."""
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
nfs.FLAGS.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
|
nfs.FLAGS.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
|
||||||
@ -226,7 +233,7 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1))
|
drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1))
|
||||||
|
|
||||||
def test_get_available_capacity_with_df(self):
|
def test_get_available_capacity_with_df(self):
|
||||||
"""_get_available_capacity should calculate correct value"""
|
"""_get_available_capacity should calculate correct value."""
|
||||||
mox = self._mox
|
mox = self._mox
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
@ -255,7 +262,7 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
delattr(nfs.FLAGS, 'nfs_disk_util')
|
delattr(nfs.FLAGS, 'nfs_disk_util')
|
||||||
|
|
||||||
def test_get_available_capacity_with_du(self):
|
def test_get_available_capacity_with_du(self):
|
||||||
"""_get_available_capacity should calculate correct value"""
|
"""_get_available_capacity should calculate correct value."""
|
||||||
mox = self._mox
|
mox = self._mox
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
@ -316,7 +323,7 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
mox.VerifyAll()
|
mox.VerifyAll()
|
||||||
|
|
||||||
def test_ensure_share_mounted(self):
|
def test_ensure_share_mounted(self):
|
||||||
"""_ensure_share_mounted simple use case"""
|
"""_ensure_share_mounted simple use case."""
|
||||||
mox = self._mox
|
mox = self._mox
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
@ -334,7 +341,7 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
mox.VerifyAll()
|
mox.VerifyAll()
|
||||||
|
|
||||||
def test_ensure_shares_mounted_should_save_mounting_successfully(self):
|
def test_ensure_shares_mounted_should_save_mounting_successfully(self):
|
||||||
"""_ensure_shares_mounted should save share if mounted with success"""
|
"""_ensure_shares_mounted should save share if mounted with success."""
|
||||||
mox = self._mox
|
mox = self._mox
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
@ -353,7 +360,7 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
mox.VerifyAll()
|
mox.VerifyAll()
|
||||||
|
|
||||||
def test_ensure_shares_mounted_should_not_save_mounting_with_error(self):
|
def test_ensure_shares_mounted_should_not_save_mounting_with_error(self):
|
||||||
"""_ensure_shares_mounted should not save share if failed to mount"""
|
"""_ensure_shares_mounted should not save share if failed to mount."""
|
||||||
mox = self._mox
|
mox = self._mox
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
@ -371,7 +378,7 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
mox.VerifyAll()
|
mox.VerifyAll()
|
||||||
|
|
||||||
def test_setup_should_throw_error_if_shares_config_not_configured(self):
|
def test_setup_should_throw_error_if_shares_config_not_configured(self):
|
||||||
"""do_setup should throw error if shares config is not configured """
|
"""do_setup should throw error if shares config is not configured."""
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
nfs.FLAGS.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
|
nfs.FLAGS.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
|
||||||
@ -380,7 +387,7 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
drv.do_setup, IsA(context.RequestContext))
|
drv.do_setup, IsA(context.RequestContext))
|
||||||
|
|
||||||
def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self):
|
def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self):
|
||||||
"""do_setup should throw error if nfs client is not installed """
|
"""do_setup should throw error if nfs client is not installed."""
|
||||||
mox = self._mox
|
mox = self._mox
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
@ -400,7 +407,7 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
mox.VerifyAll()
|
mox.VerifyAll()
|
||||||
|
|
||||||
def test_find_share_should_throw_error_if_there_is_no_mounted_shares(self):
|
def test_find_share_should_throw_error_if_there_is_no_mounted_shares(self):
|
||||||
"""_find_share should throw error if there is no mounted shares"""
|
"""_find_share should throw error if there is no mounted shares."""
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
drv._mounted_shares = []
|
drv._mounted_shares = []
|
||||||
@ -409,7 +416,7 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
self.TEST_SIZE_IN_GB)
|
self.TEST_SIZE_IN_GB)
|
||||||
|
|
||||||
def test_find_share(self):
|
def test_find_share(self):
|
||||||
"""_find_share simple use case"""
|
"""_find_share simple use case."""
|
||||||
mox = self._mox
|
mox = self._mox
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
@ -429,7 +436,7 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
mox.VerifyAll()
|
mox.VerifyAll()
|
||||||
|
|
||||||
def test_find_share_should_throw_error_if_there_is_no_enough_place(self):
|
def test_find_share_should_throw_error_if_there_is_no_enough_place(self):
|
||||||
"""_find_share should throw error if there is no share to host vol"""
|
"""_find_share should throw error if there is no share to host vol."""
|
||||||
mox = self._mox
|
mox = self._mox
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
@ -499,7 +506,7 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
delattr(nfs.FLAGS, 'nfs_sparsed_volumes')
|
delattr(nfs.FLAGS, 'nfs_sparsed_volumes')
|
||||||
|
|
||||||
def test_create_volume_should_ensure_nfs_mounted(self):
|
def test_create_volume_should_ensure_nfs_mounted(self):
|
||||||
"""create_volume should ensure shares provided in config are mounted"""
|
"""create_volume ensures shares provided in config are mounted."""
|
||||||
mox = self._mox
|
mox = self._mox
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
@ -519,7 +526,7 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
mox.VerifyAll()
|
mox.VerifyAll()
|
||||||
|
|
||||||
def test_create_volume_should_return_provider_location(self):
|
def test_create_volume_should_return_provider_location(self):
|
||||||
"""create_volume should return provider_location with found share """
|
"""create_volume should return provider_location with found share."""
|
||||||
mox = self._mox
|
mox = self._mox
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
@ -540,7 +547,7 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
mox.VerifyAll()
|
mox.VerifyAll()
|
||||||
|
|
||||||
def test_delete_volume(self):
|
def test_delete_volume(self):
|
||||||
"""delete_volume simple test case"""
|
"""delete_volume simple test case."""
|
||||||
mox = self._mox
|
mox = self._mox
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
@ -566,7 +573,7 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
mox.VerifyAll()
|
mox.VerifyAll()
|
||||||
|
|
||||||
def test_delete_should_ensure_share_mounted(self):
|
def test_delete_should_ensure_share_mounted(self):
|
||||||
"""delete_volume should ensure that corresponding share is mounted"""
|
"""delete_volume should ensure that corresponding share is mounted."""
|
||||||
mox = self._mox
|
mox = self._mox
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
@ -586,7 +593,7 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
mox.VerifyAll()
|
mox.VerifyAll()
|
||||||
|
|
||||||
def test_delete_should_not_delete_if_provider_location_not_provided(self):
|
def test_delete_should_not_delete_if_provider_location_not_provided(self):
|
||||||
"""delete_volume shouldn't try to delete if provider_location missed"""
|
"""delete_volume shouldn't delete if provider_location missed."""
|
||||||
mox = self._mox
|
mox = self._mox
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
@ -605,7 +612,7 @@ class NfsDriverTestCase(test.TestCase):
|
|||||||
mox.VerifyAll()
|
mox.VerifyAll()
|
||||||
|
|
||||||
def test_delete_should_not_delete_if_there_is_no_file(self):
|
def test_delete_should_not_delete_if_there_is_no_file(self):
|
||||||
"""delete_volume should not try to delete if file missed"""
|
"""delete_volume should not try to delete if file missed."""
|
||||||
mox = self._mox
|
mox = self._mox
|
||||||
drv = self._driver
|
drv = self._driver
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
"""Test of Policy Engine For Cinder"""
|
"""Test of Policy Engine For Cinder."""
|
||||||
|
|
||||||
import os.path
|
import os.path
|
||||||
import StringIO
|
import StringIO
|
||||||
@ -147,8 +147,8 @@ class PolicyTestCase(test.TestCase):
|
|||||||
# NOTE(dprince) we mix case in the Admin role here to ensure
|
# NOTE(dprince) we mix case in the Admin role here to ensure
|
||||||
# case is ignored
|
# case is ignored
|
||||||
admin_context = context.RequestContext('admin',
|
admin_context = context.RequestContext('admin',
|
||||||
'fake',
|
'fake',
|
||||||
roles=['AdMiN'])
|
roles=['AdMiN'])
|
||||||
policy.enforce(admin_context, lowercase_action, self.target)
|
policy.enforce(admin_context, lowercase_action, self.target)
|
||||||
policy.enforce(admin_context, uppercase_action, self.target)
|
policy.enforce(admin_context, uppercase_action, self.target)
|
||||||
|
|
||||||
@ -180,7 +180,7 @@ class DefaultPolicyTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_policy_called(self):
|
def test_policy_called(self):
|
||||||
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
|
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
|
||||||
self.context, "example:exist", {})
|
self.context, "example:exist", {})
|
||||||
|
|
||||||
def test_not_found_policy_calls_default(self):
|
def test_not_found_policy_calls_default(self):
|
||||||
policy.enforce(self.context, "example:noexist", {})
|
policy.enforce(self.context, "example:noexist", {})
|
||||||
@ -188,7 +188,7 @@ class DefaultPolicyTestCase(test.TestCase):
|
|||||||
def test_default_not_found(self):
|
def test_default_not_found(self):
|
||||||
self._set_brain("default_noexist")
|
self._set_brain("default_noexist")
|
||||||
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
|
self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
|
||||||
self.context, "example:noexist", {})
|
self.context, "example:noexist", {})
|
||||||
|
|
||||||
|
|
||||||
class ContextIsAdminPolicyTestCase(test.TestCase):
|
class ContextIsAdminPolicyTestCase(test.TestCase):
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -46,22 +46,18 @@ class RBDTestCase(test.TestCase):
|
|||||||
self.driver = RBDDriver(execute=fake_execute)
|
self.driver = RBDDriver(execute=fake_execute)
|
||||||
|
|
||||||
def test_good_locations(self):
|
def test_good_locations(self):
|
||||||
locations = [
|
locations = ['rbd://fsid/pool/image/snap',
|
||||||
'rbd://fsid/pool/image/snap',
|
'rbd://%2F/%2F/%2F/%2F', ]
|
||||||
'rbd://%2F/%2F/%2F/%2F',
|
|
||||||
]
|
|
||||||
map(self.driver._parse_location, locations)
|
map(self.driver._parse_location, locations)
|
||||||
|
|
||||||
def test_bad_locations(self):
|
def test_bad_locations(self):
|
||||||
locations = [
|
locations = ['rbd://image',
|
||||||
'rbd://image',
|
'http://path/to/somewhere/else',
|
||||||
'http://path/to/somewhere/else',
|
'rbd://image/extra',
|
||||||
'rbd://image/extra',
|
'rbd://image/',
|
||||||
'rbd://image/',
|
'rbd://fsid/pool/image/',
|
||||||
'rbd://fsid/pool/image/',
|
'rbd://fsid/pool/image/snap/',
|
||||||
'rbd://fsid/pool/image/snap/',
|
'rbd://///', ]
|
||||||
'rbd://///',
|
|
||||||
]
|
|
||||||
for loc in locations:
|
for loc in locations:
|
||||||
self.assertRaises(exception.ImageUnacceptable,
|
self.assertRaises(exception.ImageUnacceptable,
|
||||||
self.driver._parse_location,
|
self.driver._parse_location,
|
||||||
@ -142,13 +138,14 @@ class ManagedRBDTestCase(DriverTestCase):
|
|||||||
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
|
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
|
||||||
volume_id = 1
|
volume_id = 1
|
||||||
# creating volume testdata
|
# creating volume testdata
|
||||||
db.volume_create(self.context, {'id': volume_id,
|
db.volume_create(self.context,
|
||||||
'updated_at': timeutils.utcnow(),
|
{'id': volume_id,
|
||||||
'display_description': 'Test Desc',
|
'updated_at': timeutils.utcnow(),
|
||||||
'size': 20,
|
'display_description': 'Test Desc',
|
||||||
'status': 'creating',
|
'size': 20,
|
||||||
'instance_uuid': None,
|
'status': 'creating',
|
||||||
'host': 'dummy'})
|
'instance_uuid': None,
|
||||||
|
'host': 'dummy'})
|
||||||
try:
|
try:
|
||||||
if clone_works:
|
if clone_works:
|
||||||
self.volume.create_volume(self.context,
|
self.volume.create_volume(self.context,
|
||||||
|
@ -42,8 +42,7 @@ test_service_opts = [
|
|||||||
help="Host to bind test service to"),
|
help="Host to bind test service to"),
|
||||||
cfg.IntOpt("test_service_listen_port",
|
cfg.IntOpt("test_service_listen_port",
|
||||||
default=0,
|
default=0,
|
||||||
help="Port number to bind test service to"),
|
help="Port number to bind test service to"), ]
|
||||||
]
|
|
||||||
|
|
||||||
flags.FLAGS.register_opts(test_service_opts)
|
flags.FLAGS.register_opts(test_service_opts)
|
||||||
|
|
||||||
@ -131,15 +130,15 @@ class ServiceTestCase(test.TestCase):
|
|||||||
'report_count': 0,
|
'report_count': 0,
|
||||||
'availability_zone': 'nova'}
|
'availability_zone': 'nova'}
|
||||||
service_ref = {'host': host,
|
service_ref = {'host': host,
|
||||||
'binary': binary,
|
'binary': binary,
|
||||||
'topic': topic,
|
'topic': topic,
|
||||||
'report_count': 0,
|
'report_count': 0,
|
||||||
'availability_zone': 'nova',
|
'availability_zone': 'nova',
|
||||||
'id': 1}
|
'id': 1}
|
||||||
|
|
||||||
service.db.service_get_by_args(mox.IgnoreArg(),
|
service.db.service_get_by_args(mox.IgnoreArg(),
|
||||||
host,
|
host,
|
||||||
binary).AndRaise(exception.NotFound())
|
binary).AndRaise(exception.NotFound())
|
||||||
service.db.service_create(mox.IgnoreArg(),
|
service.db.service_create(mox.IgnoreArg(),
|
||||||
service_create).AndReturn(service_ref)
|
service_create).AndReturn(service_ref)
|
||||||
service.db.service_get(mox.IgnoreArg(),
|
service.db.service_get(mox.IgnoreArg(),
|
||||||
@ -164,15 +163,15 @@ class ServiceTestCase(test.TestCase):
|
|||||||
'report_count': 0,
|
'report_count': 0,
|
||||||
'availability_zone': 'nova'}
|
'availability_zone': 'nova'}
|
||||||
service_ref = {'host': host,
|
service_ref = {'host': host,
|
||||||
'binary': binary,
|
'binary': binary,
|
||||||
'topic': topic,
|
'topic': topic,
|
||||||
'report_count': 0,
|
'report_count': 0,
|
||||||
'availability_zone': 'nova',
|
'availability_zone': 'nova',
|
||||||
'id': 1}
|
'id': 1}
|
||||||
|
|
||||||
service.db.service_get_by_args(mox.IgnoreArg(),
|
service.db.service_get_by_args(mox.IgnoreArg(),
|
||||||
host,
|
host,
|
||||||
binary).AndRaise(exception.NotFound())
|
binary).AndRaise(exception.NotFound())
|
||||||
service.db.service_create(mox.IgnoreArg(),
|
service.db.service_create(mox.IgnoreArg(),
|
||||||
service_create).AndReturn(service_ref)
|
service_create).AndReturn(service_ref)
|
||||||
service.db.service_get(mox.IgnoreArg(),
|
service.db.service_get(mox.IgnoreArg(),
|
||||||
|
@ -472,8 +472,9 @@ class StorwizeSVCManagementSimulator:
|
|||||||
rows.append(["IO_group_name", "io_grp0"])
|
rows.append(["IO_group_name", "io_grp0"])
|
||||||
rows.append(["status", "online"])
|
rows.append(["status", "online"])
|
||||||
rows.append(["mdisk_grp_id", "0"])
|
rows.append(["mdisk_grp_id", "0"])
|
||||||
rows.append(["mdisk_grp_name",
|
rows.append([
|
||||||
self._flags["storwize_svc_volpool_name"]])
|
"mdisk_grp_name",
|
||||||
|
self._flags["storwize_svc_volpool_name"]])
|
||||||
rows.append(["capacity", cap])
|
rows.append(["capacity", cap])
|
||||||
rows.append(["type", "striped"])
|
rows.append(["type", "striped"])
|
||||||
rows.append(["formatted", "no"])
|
rows.append(["formatted", "no"])
|
||||||
@ -900,14 +901,14 @@ class StorwizeSVCFakeDriver(storwize_svc.StorwizeSVCDriver):
|
|||||||
LOG.debug(_('Run CLI command: %s') % cmd)
|
LOG.debug(_('Run CLI command: %s') % cmd)
|
||||||
ret = self.fake_storage.execute_command(cmd, check_exit_code)
|
ret = self.fake_storage.execute_command(cmd, check_exit_code)
|
||||||
(stdout, stderr) = ret
|
(stdout, stderr) = ret
|
||||||
LOG.debug(_('CLI output:\n stdout: %(out)s\n stderr: %(err)s') %
|
LOG.debug(_('CLI output:\n stdout: %(out)s\n stderr: %(err)s') % {
|
||||||
{'out': stdout, 'err': stderr})
|
'out': stdout, 'err': stderr})
|
||||||
|
|
||||||
except exception.ProcessExecutionError as e:
|
except exception.ProcessExecutionError as e:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.debug(_('CLI Exception output:\n stdout: %(out)s\n '
|
LOG.debug(_('CLI Exception output:\n stdout: %(out)s\n '
|
||||||
'stderr: %(err)s') % {'out': e.stdout,
|
'stderr: %(err)s') % {'out': e.stdout,
|
||||||
'err': e.stderr})
|
'err': e.stderr})
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
@ -964,25 +965,25 @@ class StorwizeSVCDriverTestCase(test.TestCase):
|
|||||||
# Check for missing san_ip
|
# Check for missing san_ip
|
||||||
self.flags(san_ip=None)
|
self.flags(san_ip=None)
|
||||||
self.assertRaises(exception.InvalidInput,
|
self.assertRaises(exception.InvalidInput,
|
||||||
self.driver._check_flags)
|
self.driver._check_flags)
|
||||||
|
|
||||||
if self.USESIM != 1:
|
if self.USESIM != 1:
|
||||||
# Check for invalid ip
|
# Check for invalid ip
|
||||||
self.flags(san_ip="-1.-1.-1.-1")
|
self.flags(san_ip="-1.-1.-1.-1")
|
||||||
self.assertRaises(socket.gaierror,
|
self.assertRaises(socket.gaierror,
|
||||||
self.driver.check_for_setup_error)
|
self.driver.check_for_setup_error)
|
||||||
|
|
||||||
# Check for unreachable IP
|
# Check for unreachable IP
|
||||||
self.flags(san_ip="1.1.1.1")
|
self.flags(san_ip="1.1.1.1")
|
||||||
self.assertRaises(socket.error,
|
self.assertRaises(socket.error,
|
||||||
self.driver.check_for_setup_error)
|
self.driver.check_for_setup_error)
|
||||||
|
|
||||||
def test_storwize_svc_connectivity(self):
|
def test_storwize_svc_connectivity(self):
|
||||||
# Make sure we detect if the pool doesn't exist
|
# Make sure we detect if the pool doesn't exist
|
||||||
no_exist_pool = "i-dont-exist-%s" % random.randint(10000, 99999)
|
no_exist_pool = "i-dont-exist-%s" % random.randint(10000, 99999)
|
||||||
self.flags(storwize_svc_volpool_name=no_exist_pool)
|
self.flags(storwize_svc_volpool_name=no_exist_pool)
|
||||||
self.assertRaises(exception.InvalidInput,
|
self.assertRaises(exception.InvalidInput,
|
||||||
self.driver.check_for_setup_error)
|
self.driver.check_for_setup_error)
|
||||||
FLAGS.reset()
|
FLAGS.reset()
|
||||||
|
|
||||||
# Check the case where the user didn't configure IP addresses
|
# Check the case where the user didn't configure IP addresses
|
||||||
@ -990,56 +991,56 @@ class StorwizeSVCDriverTestCase(test.TestCase):
|
|||||||
if self.USESIM == 1:
|
if self.USESIM == 1:
|
||||||
self.sim.error_injection("lsnodecanister", "header_mismatch")
|
self.sim.error_injection("lsnodecanister", "header_mismatch")
|
||||||
self.assertRaises(exception.VolumeBackendAPIException,
|
self.assertRaises(exception.VolumeBackendAPIException,
|
||||||
self.driver.check_for_setup_error)
|
self.driver.check_for_setup_error)
|
||||||
self.sim.error_injection("lsnodecanister", "remove_field")
|
self.sim.error_injection("lsnodecanister", "remove_field")
|
||||||
self.assertRaises(exception.VolumeBackendAPIException,
|
self.assertRaises(exception.VolumeBackendAPIException,
|
||||||
self.driver.check_for_setup_error)
|
self.driver.check_for_setup_error)
|
||||||
self.sim.error_injection("lsportip", "ip_no_config")
|
self.sim.error_injection("lsportip", "ip_no_config")
|
||||||
self.assertRaises(exception.VolumeBackendAPIException,
|
self.assertRaises(exception.VolumeBackendAPIException,
|
||||||
self.driver.check_for_setup_error)
|
self.driver.check_for_setup_error)
|
||||||
self.sim.error_injection("lsportip", "header_mismatch")
|
self.sim.error_injection("lsportip", "header_mismatch")
|
||||||
self.assertRaises(exception.VolumeBackendAPIException,
|
self.assertRaises(exception.VolumeBackendAPIException,
|
||||||
self.driver.check_for_setup_error)
|
self.driver.check_for_setup_error)
|
||||||
self.sim.error_injection("lsportip", "remove_field")
|
self.sim.error_injection("lsportip", "remove_field")
|
||||||
self.assertRaises(exception.VolumeBackendAPIException,
|
self.assertRaises(exception.VolumeBackendAPIException,
|
||||||
self.driver.check_for_setup_error)
|
self.driver.check_for_setup_error)
|
||||||
|
|
||||||
# Check with bad parameters
|
# Check with bad parameters
|
||||||
self.flags(san_password=None)
|
self.flags(san_password=None)
|
||||||
self.flags(san_private_key=None)
|
self.flags(san_private_key=None)
|
||||||
self.assertRaises(exception.InvalidInput,
|
self.assertRaises(exception.InvalidInput,
|
||||||
self.driver._check_flags)
|
self.driver._check_flags)
|
||||||
FLAGS.reset()
|
FLAGS.reset()
|
||||||
|
|
||||||
self.flags(storwize_svc_vol_rsize="invalid")
|
self.flags(storwize_svc_vol_rsize="invalid")
|
||||||
self.assertRaises(exception.InvalidInput,
|
self.assertRaises(exception.InvalidInput,
|
||||||
self.driver._check_flags)
|
self.driver._check_flags)
|
||||||
FLAGS.reset()
|
FLAGS.reset()
|
||||||
|
|
||||||
self.flags(storwize_svc_vol_warning="invalid")
|
self.flags(storwize_svc_vol_warning="invalid")
|
||||||
self.assertRaises(exception.InvalidInput,
|
self.assertRaises(exception.InvalidInput,
|
||||||
self.driver._check_flags)
|
self.driver._check_flags)
|
||||||
FLAGS.reset()
|
FLAGS.reset()
|
||||||
|
|
||||||
self.flags(storwize_svc_vol_autoexpand="invalid")
|
self.flags(storwize_svc_vol_autoexpand="invalid")
|
||||||
self.assertRaises(exception.InvalidInput,
|
self.assertRaises(exception.InvalidInput,
|
||||||
self.driver._check_flags)
|
self.driver._check_flags)
|
||||||
FLAGS.reset()
|
FLAGS.reset()
|
||||||
|
|
||||||
self.flags(storwize_svc_vol_grainsize=str(42))
|
self.flags(storwize_svc_vol_grainsize=str(42))
|
||||||
self.assertRaises(exception.InvalidInput,
|
self.assertRaises(exception.InvalidInput,
|
||||||
self.driver._check_flags)
|
self.driver._check_flags)
|
||||||
FLAGS.reset()
|
FLAGS.reset()
|
||||||
|
|
||||||
self.flags(storwize_svc_flashcopy_timeout=str(601))
|
self.flags(storwize_svc_flashcopy_timeout=str(601))
|
||||||
self.assertRaises(exception.InvalidInput,
|
self.assertRaises(exception.InvalidInput,
|
||||||
self.driver._check_flags)
|
self.driver._check_flags)
|
||||||
FLAGS.reset()
|
FLAGS.reset()
|
||||||
|
|
||||||
self.flags(storwize_svc_vol_compression=True)
|
self.flags(storwize_svc_vol_compression=True)
|
||||||
self.flags(storwize_svc_vol_rsize="-1")
|
self.flags(storwize_svc_vol_rsize="-1")
|
||||||
self.assertRaises(exception.InvalidInput,
|
self.assertRaises(exception.InvalidInput,
|
||||||
self.driver._check_flags)
|
self.driver._check_flags)
|
||||||
FLAGS.reset()
|
FLAGS.reset()
|
||||||
|
|
||||||
# Finally, check with good parameters
|
# Finally, check with good parameters
|
||||||
@ -1059,7 +1060,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
|
|||||||
# Test timeout and volume cleanup
|
# Test timeout and volume cleanup
|
||||||
self.flags(storwize_svc_flashcopy_timeout=str(1))
|
self.flags(storwize_svc_flashcopy_timeout=str(1))
|
||||||
self.assertRaises(exception.InvalidSnapshot,
|
self.assertRaises(exception.InvalidSnapshot,
|
||||||
self.driver.create_snapshot, snapshot)
|
self.driver.create_snapshot, snapshot)
|
||||||
is_volume_defined = self.driver._is_volume_defined(snapshot["name"])
|
is_volume_defined = self.driver._is_volume_defined(snapshot["name"])
|
||||||
self.assertEqual(is_volume_defined, False)
|
self.assertEqual(is_volume_defined, False)
|
||||||
FLAGS.reset()
|
FLAGS.reset()
|
||||||
@ -1068,21 +1069,21 @@ class StorwizeSVCDriverTestCase(test.TestCase):
|
|||||||
if self.USESIM == 1:
|
if self.USESIM == 1:
|
||||||
self.sim.error_injection("lsfcmap", "bogus_prepare")
|
self.sim.error_injection("lsfcmap", "bogus_prepare")
|
||||||
self.assertRaises(exception.VolumeBackendAPIException,
|
self.assertRaises(exception.VolumeBackendAPIException,
|
||||||
self.driver.create_snapshot, snapshot)
|
self.driver.create_snapshot, snapshot)
|
||||||
|
|
||||||
# Test prestartfcmap, startfcmap, and rmfcmap failing
|
# Test prestartfcmap, startfcmap, and rmfcmap failing
|
||||||
if self.USESIM == 1:
|
if self.USESIM == 1:
|
||||||
self.sim.error_injection("prestartfcmap", "bad_id")
|
self.sim.error_injection("prestartfcmap", "bad_id")
|
||||||
self.assertRaises(exception.ProcessExecutionError,
|
self.assertRaises(exception.ProcessExecutionError,
|
||||||
self.driver.create_snapshot, snapshot)
|
self.driver.create_snapshot, snapshot)
|
||||||
self.sim.error_injection("lsfcmap", "speed_up")
|
self.sim.error_injection("lsfcmap", "speed_up")
|
||||||
self.sim.error_injection("startfcmap", "bad_id")
|
self.sim.error_injection("startfcmap", "bad_id")
|
||||||
self.assertRaises(exception.ProcessExecutionError,
|
self.assertRaises(exception.ProcessExecutionError,
|
||||||
self.driver.create_snapshot, snapshot)
|
self.driver.create_snapshot, snapshot)
|
||||||
self.sim.error_injection("prestartfcmap", "bad_id")
|
self.sim.error_injection("prestartfcmap", "bad_id")
|
||||||
self.sim.error_injection("rmfcmap", "bad_id")
|
self.sim.error_injection("rmfcmap", "bad_id")
|
||||||
self.assertRaises(exception.ProcessExecutionError,
|
self.assertRaises(exception.ProcessExecutionError,
|
||||||
self.driver.create_snapshot, snapshot)
|
self.driver.create_snapshot, snapshot)
|
||||||
|
|
||||||
# Test successful snapshot
|
# Test successful snapshot
|
||||||
self.driver.create_snapshot(snapshot)
|
self.driver.create_snapshot(snapshot)
|
||||||
@ -1119,7 +1120,9 @@ class StorwizeSVCDriverTestCase(test.TestCase):
|
|||||||
if self.USESIM == 1:
|
if self.USESIM == 1:
|
||||||
self.sim.error_injection("prestartfcmap", "bad_id")
|
self.sim.error_injection("prestartfcmap", "bad_id")
|
||||||
self.assertRaises(exception.ProcessExecutionError,
|
self.assertRaises(exception.ProcessExecutionError,
|
||||||
self.driver.create_volume_from_snapshot, volume2, snapshot)
|
self.driver.create_volume_from_snapshot,
|
||||||
|
volume2,
|
||||||
|
snapshot)
|
||||||
|
|
||||||
# Succeed
|
# Succeed
|
||||||
if self.USESIM == 1:
|
if self.USESIM == 1:
|
||||||
@ -1141,7 +1144,8 @@ class StorwizeSVCDriverTestCase(test.TestCase):
|
|||||||
self.driver.create_volume(volume3)
|
self.driver.create_volume(volume3)
|
||||||
snapshot["name"] = volume3["name"]
|
snapshot["name"] = volume3["name"]
|
||||||
self.assertRaises(exception.InvalidSnapshot,
|
self.assertRaises(exception.InvalidSnapshot,
|
||||||
self.driver.create_snapshot, snapshot)
|
self.driver.create_snapshot,
|
||||||
|
snapshot)
|
||||||
self.driver._delete_volume(volume1, True)
|
self.driver._delete_volume(volume1, True)
|
||||||
self.driver._delete_volume(volume3, True)
|
self.driver._delete_volume(volume3, True)
|
||||||
|
|
||||||
@ -1150,7 +1154,8 @@ class StorwizeSVCDriverTestCase(test.TestCase):
|
|||||||
snapshot["name"] = "snap_volume%s" % random.randint(10000, 99999)
|
snapshot["name"] = "snap_volume%s" % random.randint(10000, 99999)
|
||||||
snapshot["volume_name"] = "no_exist"
|
snapshot["volume_name"] = "no_exist"
|
||||||
self.assertRaises(exception.VolumeNotFound,
|
self.assertRaises(exception.VolumeNotFound,
|
||||||
self.driver.create_snapshot, snapshot)
|
self.driver.create_snapshot,
|
||||||
|
snapshot)
|
||||||
|
|
||||||
def test_storwize_svc_volumes(self):
|
def test_storwize_svc_volumes(self):
|
||||||
# Create a first volume
|
# Create a first volume
|
||||||
@ -1176,7 +1181,8 @@ class StorwizeSVCDriverTestCase(test.TestCase):
|
|||||||
|
|
||||||
# Try to create the volume again (should fail)
|
# Try to create the volume again (should fail)
|
||||||
self.assertRaises(exception.ProcessExecutionError,
|
self.assertRaises(exception.ProcessExecutionError,
|
||||||
self.driver.create_volume, volume)
|
self.driver.create_volume,
|
||||||
|
volume)
|
||||||
|
|
||||||
# Try to delete a volume that doesn't exist (should not fail)
|
# Try to delete a volume that doesn't exist (should not fail)
|
||||||
vol_no_exist = {"name": "i_dont_exist"}
|
vol_no_exist = {"name": "i_dont_exist"}
|
||||||
@ -1270,7 +1276,7 @@ class StorwizeSVCDriverTestCase(test.TestCase):
|
|||||||
if self.USESIM == 1:
|
if self.USESIM == 1:
|
||||||
self.sim.error_injection("mkvdisk", "no_compression")
|
self.sim.error_injection("mkvdisk", "no_compression")
|
||||||
self.assertRaises(exception.ProcessExecutionError,
|
self.assertRaises(exception.ProcessExecutionError,
|
||||||
self._create_test_vol)
|
self._create_test_vol)
|
||||||
FLAGS.reset()
|
FLAGS.reset()
|
||||||
|
|
||||||
def test_storwize_svc_unicode_host_and_volume_names(self):
|
def test_storwize_svc_unicode_host_and_volume_names(self):
|
||||||
@ -1328,7 +1334,8 @@ class StorwizeSVCDriverTestCase(test.TestCase):
|
|||||||
|
|
||||||
# Try to delete the 1st volume (should fail because it is mapped)
|
# Try to delete the 1st volume (should fail because it is mapped)
|
||||||
self.assertRaises(exception.ProcessExecutionError,
|
self.assertRaises(exception.ProcessExecutionError,
|
||||||
self.driver.delete_volume, volume1)
|
self.driver.delete_volume,
|
||||||
|
volume1)
|
||||||
|
|
||||||
# Test no preferred node
|
# Test no preferred node
|
||||||
self.driver.terminate_connection(volume1, conn)
|
self.driver.terminate_connection(volume1, conn)
|
||||||
@ -1346,7 +1353,9 @@ class StorwizeSVCDriverTestCase(test.TestCase):
|
|||||||
# Try to remove connection from host that doesn't exist (should fail)
|
# Try to remove connection from host that doesn't exist (should fail)
|
||||||
conn_no_exist = {"initiator": "i_dont_exist"}
|
conn_no_exist = {"initiator": "i_dont_exist"}
|
||||||
self.assertRaises(exception.VolumeBackendAPIException,
|
self.assertRaises(exception.VolumeBackendAPIException,
|
||||||
self.driver.terminate_connection, volume1, conn_no_exist)
|
self.driver.terminate_connection,
|
||||||
|
volume1,
|
||||||
|
conn_no_exist)
|
||||||
|
|
||||||
# Try to remove connection from volume that isn't mapped (should print
|
# Try to remove connection from volume that isn't mapped (should print
|
||||||
# message but NOT fail)
|
# message but NOT fail)
|
||||||
|
@ -20,7 +20,7 @@ from cinder.tests import utils as test_utils
|
|||||||
|
|
||||||
class TestUtilsTestCase(test.TestCase):
|
class TestUtilsTestCase(test.TestCase):
|
||||||
def test_get_test_admin_context(self):
|
def test_get_test_admin_context(self):
|
||||||
"""get_test_admin_context's return value behaves like admin context"""
|
"""get_test_admin_context's return value behaves like admin context."""
|
||||||
ctxt = test_utils.get_test_admin_context()
|
ctxt = test_utils.get_test_admin_context()
|
||||||
|
|
||||||
# TODO(soren): This should verify the full interface context
|
# TODO(soren): This should verify the full interface context
|
||||||
|
@ -350,8 +350,9 @@ class GenericUtilsTestCase(test.TestCase):
|
|||||||
self.assertEqual(reloaded_data, fake_contents)
|
self.assertEqual(reloaded_data, fake_contents)
|
||||||
self.reload_called = True
|
self.reload_called = True
|
||||||
|
|
||||||
data = utils.read_cached_file("/this/is/a/fake", cache_data,
|
data = utils.read_cached_file("/this/is/a/fake",
|
||||||
reload_func=test_reload)
|
cache_data,
|
||||||
|
reload_func=test_reload)
|
||||||
self.assertEqual(data, fake_contents)
|
self.assertEqual(data, fake_contents)
|
||||||
self.assertTrue(self.reload_called)
|
self.assertTrue(self.reload_called)
|
||||||
|
|
||||||
@ -445,7 +446,8 @@ class MonkeyPatchTestCase(test.TestCase):
|
|||||||
self.flags(
|
self.flags(
|
||||||
monkey_patch=True,
|
monkey_patch=True,
|
||||||
monkey_patch_modules=[self.example_package + 'example_a' + ':'
|
monkey_patch_modules=[self.example_package + 'example_a' + ':'
|
||||||
+ self.example_package + 'example_decorator'])
|
+ self.example_package
|
||||||
|
+ 'example_decorator'])
|
||||||
|
|
||||||
def test_monkey_patch(self):
|
def test_monkey_patch(self):
|
||||||
utils.monkey_patch()
|
utils.monkey_patch()
|
||||||
@ -467,19 +469,19 @@ class MonkeyPatchTestCase(test.TestCase):
|
|||||||
self.assertEqual(ret_b, 8)
|
self.assertEqual(ret_b, 8)
|
||||||
package_a = self.example_package + 'example_a.'
|
package_a = self.example_package + 'example_a.'
|
||||||
self.assertTrue(package_a + 'example_function_a'
|
self.assertTrue(package_a + 'example_function_a'
|
||||||
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
|
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
|
||||||
|
|
||||||
self.assertTrue(package_a + 'ExampleClassA.example_method'
|
self.assertTrue(package_a + 'ExampleClassA.example_method'
|
||||||
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
|
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
|
||||||
self.assertTrue(package_a + 'ExampleClassA.example_method_add'
|
self.assertTrue(package_a + 'ExampleClassA.example_method_add'
|
||||||
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
|
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
|
||||||
package_b = self.example_package + 'example_b.'
|
package_b = self.example_package + 'example_b.'
|
||||||
self.assertFalse(package_b + 'example_function_b'
|
self.assertFalse(package_b + 'example_function_b'
|
||||||
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
|
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
|
||||||
self.assertFalse(package_b + 'ExampleClassB.example_method'
|
self.assertFalse(package_b + 'ExampleClassB.example_method'
|
||||||
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
|
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
|
||||||
self.assertFalse(package_b + 'ExampleClassB.example_method_add'
|
self.assertFalse(package_b + 'ExampleClassB.example_method_add'
|
||||||
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
|
in cinder.tests.monkey_patch_example.CALLED_FUNCTION)
|
||||||
|
|
||||||
|
|
||||||
class AuditPeriodTest(test.TestCase):
|
class AuditPeriodTest(test.TestCase):
|
||||||
@ -501,149 +503,126 @@ class AuditPeriodTest(test.TestCase):
|
|||||||
|
|
||||||
def test_hour(self):
|
def test_hour(self):
|
||||||
begin, end = utils.last_completed_audit_period(unit='hour')
|
begin, end = utils.last_completed_audit_period(unit='hour')
|
||||||
self.assertEquals(begin, datetime.datetime(
|
self.assertEquals(begin,
|
||||||
hour=7,
|
datetime.datetime(hour=7,
|
||||||
day=5,
|
day=5,
|
||||||
month=3,
|
month=3,
|
||||||
year=2012))
|
year=2012))
|
||||||
self.assertEquals(end, datetime.datetime(
|
self.assertEquals(end, datetime.datetime(hour=8,
|
||||||
hour=8,
|
day=5,
|
||||||
day=5,
|
month=3,
|
||||||
month=3,
|
year=2012))
|
||||||
year=2012))
|
|
||||||
|
|
||||||
def test_hour_with_offset_before_current(self):
|
def test_hour_with_offset_before_current(self):
|
||||||
begin, end = utils.last_completed_audit_period(unit='hour@10')
|
begin, end = utils.last_completed_audit_period(unit='hour@10')
|
||||||
self.assertEquals(begin, datetime.datetime(
|
self.assertEquals(begin, datetime.datetime(minute=10,
|
||||||
minute=10,
|
hour=7,
|
||||||
hour=7,
|
day=5,
|
||||||
day=5,
|
month=3,
|
||||||
month=3,
|
year=2012))
|
||||||
year=2012))
|
self.assertEquals(end, datetime.datetime(minute=10,
|
||||||
self.assertEquals(end, datetime.datetime(
|
hour=8,
|
||||||
minute=10,
|
day=5,
|
||||||
hour=8,
|
month=3,
|
||||||
day=5,
|
year=2012))
|
||||||
month=3,
|
|
||||||
year=2012))
|
|
||||||
|
|
||||||
def test_hour_with_offset_after_current(self):
|
def test_hour_with_offset_after_current(self):
|
||||||
begin, end = utils.last_completed_audit_period(unit='hour@30')
|
begin, end = utils.last_completed_audit_period(unit='hour@30')
|
||||||
self.assertEquals(begin, datetime.datetime(
|
self.assertEquals(begin, datetime.datetime(minute=30,
|
||||||
minute=30,
|
hour=6,
|
||||||
hour=6,
|
day=5,
|
||||||
day=5,
|
month=3,
|
||||||
month=3,
|
year=2012))
|
||||||
year=2012))
|
self.assertEquals(end, datetime.datetime(minute=30,
|
||||||
self.assertEquals(end, datetime.datetime(
|
hour=7,
|
||||||
minute=30,
|
day=5,
|
||||||
hour=7,
|
month=3,
|
||||||
day=5,
|
year=2012))
|
||||||
month=3,
|
|
||||||
year=2012))
|
|
||||||
|
|
||||||
def test_day(self):
|
def test_day(self):
|
||||||
begin, end = utils.last_completed_audit_period(unit='day')
|
begin, end = utils.last_completed_audit_period(unit='day')
|
||||||
self.assertEquals(begin, datetime.datetime(
|
self.assertEquals(begin, datetime.datetime(day=4,
|
||||||
day=4,
|
month=3,
|
||||||
month=3,
|
year=2012))
|
||||||
year=2012))
|
self.assertEquals(end, datetime.datetime(day=5,
|
||||||
self.assertEquals(end, datetime.datetime(
|
month=3,
|
||||||
day=5,
|
year=2012))
|
||||||
month=3,
|
|
||||||
year=2012))
|
|
||||||
|
|
||||||
def test_day_with_offset_before_current(self):
|
def test_day_with_offset_before_current(self):
|
||||||
begin, end = utils.last_completed_audit_period(unit='day@6')
|
begin, end = utils.last_completed_audit_period(unit='day@6')
|
||||||
self.assertEquals(begin, datetime.datetime(
|
self.assertEquals(begin, datetime.datetime(hour=6,
|
||||||
hour=6,
|
day=4,
|
||||||
day=4,
|
month=3,
|
||||||
month=3,
|
year=2012))
|
||||||
year=2012))
|
self.assertEquals(end, datetime.datetime(hour=6,
|
||||||
self.assertEquals(end, datetime.datetime(
|
day=5,
|
||||||
hour=6,
|
month=3,
|
||||||
day=5,
|
year=2012))
|
||||||
month=3,
|
|
||||||
year=2012))
|
|
||||||
|
|
||||||
def test_day_with_offset_after_current(self):
|
def test_day_with_offset_after_current(self):
|
||||||
begin, end = utils.last_completed_audit_period(unit='day@10')
|
begin, end = utils.last_completed_audit_period(unit='day@10')
|
||||||
self.assertEquals(begin, datetime.datetime(
|
self.assertEquals(begin, datetime.datetime(hour=10,
|
||||||
hour=10,
|
day=3,
|
||||||
day=3,
|
month=3,
|
||||||
month=3,
|
year=2012))
|
||||||
year=2012))
|
self.assertEquals(end, datetime.datetime(hour=10,
|
||||||
self.assertEquals(end, datetime.datetime(
|
day=4,
|
||||||
hour=10,
|
month=3,
|
||||||
day=4,
|
year=2012))
|
||||||
month=3,
|
|
||||||
year=2012))
|
|
||||||
|
|
||||||
def test_month(self):
|
def test_month(self):
|
||||||
begin, end = utils.last_completed_audit_period(unit='month')
|
begin, end = utils.last_completed_audit_period(unit='month')
|
||||||
self.assertEquals(begin, datetime.datetime(
|
self.assertEquals(begin, datetime.datetime(day=1,
|
||||||
day=1,
|
month=2,
|
||||||
month=2,
|
year=2012))
|
||||||
year=2012))
|
self.assertEquals(end, datetime.datetime(day=1,
|
||||||
self.assertEquals(end, datetime.datetime(
|
month=3,
|
||||||
day=1,
|
year=2012))
|
||||||
month=3,
|
|
||||||
year=2012))
|
|
||||||
|
|
||||||
def test_month_with_offset_before_current(self):
|
def test_month_with_offset_before_current(self):
|
||||||
begin, end = utils.last_completed_audit_period(unit='month@2')
|
begin, end = utils.last_completed_audit_period(unit='month@2')
|
||||||
self.assertEquals(begin, datetime.datetime(
|
self.assertEquals(begin, datetime.datetime(day=2,
|
||||||
day=2,
|
month=2,
|
||||||
month=2,
|
year=2012))
|
||||||
year=2012))
|
self.assertEquals(end, datetime.datetime(day=2,
|
||||||
self.assertEquals(end, datetime.datetime(
|
month=3,
|
||||||
day=2,
|
year=2012))
|
||||||
month=3,
|
|
||||||
year=2012))
|
|
||||||
|
|
||||||
def test_month_with_offset_after_current(self):
|
def test_month_with_offset_after_current(self):
|
||||||
begin, end = utils.last_completed_audit_period(unit='month@15')
|
begin, end = utils.last_completed_audit_period(unit='month@15')
|
||||||
self.assertEquals(begin, datetime.datetime(
|
self.assertEquals(begin, datetime.datetime(day=15,
|
||||||
day=15,
|
month=1,
|
||||||
month=1,
|
year=2012))
|
||||||
year=2012))
|
self.assertEquals(end, datetime.datetime(day=15,
|
||||||
self.assertEquals(end, datetime.datetime(
|
month=2,
|
||||||
day=15,
|
year=2012))
|
||||||
month=2,
|
|
||||||
year=2012))
|
|
||||||
|
|
||||||
def test_year(self):
|
def test_year(self):
|
||||||
begin, end = utils.last_completed_audit_period(unit='year')
|
begin, end = utils.last_completed_audit_period(unit='year')
|
||||||
self.assertEquals(begin, datetime.datetime(
|
self.assertEquals(begin, datetime.datetime(day=1,
|
||||||
day=1,
|
month=1,
|
||||||
month=1,
|
year=2011))
|
||||||
year=2011))
|
self.assertEquals(end, datetime.datetime(day=1,
|
||||||
self.assertEquals(end, datetime.datetime(
|
month=1,
|
||||||
day=1,
|
year=2012))
|
||||||
month=1,
|
|
||||||
year=2012))
|
|
||||||
|
|
||||||
def test_year_with_offset_before_current(self):
|
def test_year_with_offset_before_current(self):
|
||||||
begin, end = utils.last_completed_audit_period(unit='year@2')
|
begin, end = utils.last_completed_audit_period(unit='year@2')
|
||||||
self.assertEquals(begin, datetime.datetime(
|
self.assertEquals(begin, datetime.datetime(day=1,
|
||||||
day=1,
|
month=2,
|
||||||
month=2,
|
year=2011))
|
||||||
year=2011))
|
self.assertEquals(end, datetime.datetime(day=1,
|
||||||
self.assertEquals(end, datetime.datetime(
|
month=2,
|
||||||
day=1,
|
year=2012))
|
||||||
month=2,
|
|
||||||
year=2012))
|
|
||||||
|
|
||||||
def test_year_with_offset_after_current(self):
|
def test_year_with_offset_after_current(self):
|
||||||
begin, end = utils.last_completed_audit_period(unit='year@6')
|
begin, end = utils.last_completed_audit_period(unit='year@6')
|
||||||
self.assertEquals(begin, datetime.datetime(
|
self.assertEquals(begin, datetime.datetime(day=1,
|
||||||
day=1,
|
month=6,
|
||||||
month=6,
|
year=2010))
|
||||||
year=2010))
|
self.assertEquals(end, datetime.datetime(day=1,
|
||||||
self.assertEquals(end, datetime.datetime(
|
month=6,
|
||||||
day=1,
|
year=2011))
|
||||||
month=6,
|
|
||||||
year=2011))
|
|
||||||
|
|
||||||
|
|
||||||
class FakeSSHClient(object):
|
class FakeSSHClient(object):
|
||||||
|
@ -20,40 +20,40 @@ from cinder import version
|
|||||||
|
|
||||||
|
|
||||||
class VersionTestCase(test.TestCase):
|
class VersionTestCase(test.TestCase):
|
||||||
"""Test cases for Versions code"""
|
"""Test cases for Versions code."""
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
"""setup test with unchanging values"""
|
"""Setup test with unchanging values."""
|
||||||
super(VersionTestCase, self).setUp()
|
super(VersionTestCase, self).setUp()
|
||||||
self.version = version
|
self.version = version
|
||||||
self.version.FINAL = False
|
self.version.FINAL = False
|
||||||
self.version.CINDER_VERSION = ['2012', '10']
|
self.version.CINDER_VERSION = ['2012', '10']
|
||||||
self.version.YEAR, self.version.COUNT = self.version.CINDER_VERSION
|
self.version.YEAR, self.version.COUNT = self.version.CINDER_VERSION
|
||||||
self.version.version_info = {'branch_nick': u'LOCALBRANCH',
|
self.version.version_info = {'branch_nick': u'LOCALBRANCH',
|
||||||
'revision_id': 'LOCALREVISION',
|
'revision_id': 'LOCALREVISION',
|
||||||
'revno': 0}
|
'revno': 0}
|
||||||
|
|
||||||
def test_version_string_is_good(self):
|
def test_version_string_is_good(self):
|
||||||
"""Ensure version string works"""
|
"""Ensure version string works."""
|
||||||
self.assertEqual("2012.10-dev", self.version.version_string())
|
self.assertEqual("2012.10-dev", self.version.version_string())
|
||||||
|
|
||||||
def test_canonical_version_string_is_good(self):
|
def test_canonical_version_string_is_good(self):
|
||||||
"""Ensure canonical version works"""
|
"""Ensure canonical version works."""
|
||||||
self.assertEqual("2012.10", self.version.canonical_version_string())
|
self.assertEqual("2012.10", self.version.canonical_version_string())
|
||||||
|
|
||||||
def test_final_version_strings_are_identical(self):
|
def test_final_version_strings_are_identical(self):
|
||||||
"""Ensure final version strings match only at release"""
|
"""Ensure final version strings match only at release."""
|
||||||
self.assertNotEqual(self.version.canonical_version_string(),
|
self.assertNotEqual(self.version.canonical_version_string(),
|
||||||
self.version.version_string())
|
self.version.version_string())
|
||||||
self.version.FINAL = True
|
self.version.FINAL = True
|
||||||
self.assertEqual(self.version.canonical_version_string(),
|
self.assertEqual(self.version.canonical_version_string(),
|
||||||
self.version.version_string())
|
self.version.version_string())
|
||||||
|
|
||||||
def test_vcs_version_string_is_good(self):
|
def test_vcs_version_string_is_good(self):
|
||||||
"""Ensure uninstalled code generates local """
|
"""Ensure uninstalled code generates local."""
|
||||||
self.assertEqual("LOCALBRANCH:LOCALREVISION",
|
self.assertEqual("LOCALBRANCH:LOCALREVISION",
|
||||||
self.version.vcs_version_string())
|
self.version.vcs_version_string())
|
||||||
|
|
||||||
def test_version_string_with_vcs_is_good(self):
|
def test_version_string_with_vcs_is_good(self):
|
||||||
"""Ensure uninstalled code get version string"""
|
"""Ensure uninstalled code get version string."""
|
||||||
self.assertEqual("2012.10-LOCALBRANCH:LOCALREVISION",
|
self.assertEqual("2012.10-LOCALBRANCH:LOCALREVISION",
|
||||||
self.version.version_string_with_vcs())
|
self.version.version_string_with_vcs())
|
||||||
|
@ -201,8 +201,8 @@ class VolumeTestCase(test.TestCase):
|
|||||||
self.volume.create_volume(self.context, volume_id)
|
self.volume.create_volume(self.context, volume_id)
|
||||||
|
|
||||||
self.mox.StubOutWithMock(self.volume.driver, 'delete_volume')
|
self.mox.StubOutWithMock(self.volume.driver, 'delete_volume')
|
||||||
self.volume.driver.delete_volume(mox.IgnoreArg()) \
|
self.volume.driver.delete_volume(
|
||||||
.AndRaise(exception.VolumeIsBusy)
|
mox.IgnoreArg()).AndRaise(exception.VolumeIsBusy)
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
res = self.volume.delete_volume(self.context, volume_id)
|
res = self.volume.delete_volume(self.context, volume_id)
|
||||||
self.assertEqual(True, res)
|
self.assertEqual(True, res)
|
||||||
@ -226,9 +226,9 @@ class VolumeTestCase(test.TestCase):
|
|||||||
db.volume_get(
|
db.volume_get(
|
||||||
context.get_admin_context(),
|
context.get_admin_context(),
|
||||||
volume_dst['id']).id)
|
volume_dst['id']).id)
|
||||||
self.assertEqual(snapshot_id, db.volume_get(
|
self.assertEqual(snapshot_id,
|
||||||
context.get_admin_context(),
|
db.volume_get(context.get_admin_context(),
|
||||||
volume_dst['id']).snapshot_id)
|
volume_dst['id']).snapshot_id)
|
||||||
|
|
||||||
self.volume.delete_volume(self.context, volume_dst['id'])
|
self.volume.delete_volume(self.context, volume_dst['id'])
|
||||||
self.volume.delete_snapshot(self.context, snapshot_id)
|
self.volume.delete_snapshot(self.context, snapshot_id)
|
||||||
@ -454,8 +454,8 @@ class VolumeTestCase(test.TestCase):
|
|||||||
self.volume.create_snapshot(self.context, volume_id, snapshot_id)
|
self.volume.create_snapshot(self.context, volume_id, snapshot_id)
|
||||||
|
|
||||||
self.mox.StubOutWithMock(self.volume.driver, 'delete_snapshot')
|
self.mox.StubOutWithMock(self.volume.driver, 'delete_snapshot')
|
||||||
self.volume.driver.delete_snapshot(mox.IgnoreArg()) \
|
self.volume.driver.delete_snapshot(
|
||||||
.AndRaise(exception.SnapshotIsBusy)
|
mox.IgnoreArg()).AndRaise(exception.SnapshotIsBusy)
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
self.volume.delete_snapshot(self.context, snapshot_id)
|
self.volume.delete_snapshot(self.context, snapshot_id)
|
||||||
snapshot_ref = db.snapshot_get(self.context, snapshot_id)
|
snapshot_ref = db.snapshot_get(self.context, snapshot_id)
|
||||||
@ -486,13 +486,14 @@ class VolumeTestCase(test.TestCase):
|
|||||||
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
|
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
|
||||||
volume_id = 1
|
volume_id = 1
|
||||||
# creating volume testdata
|
# creating volume testdata
|
||||||
db.volume_create(self.context, {'id': volume_id,
|
db.volume_create(self.context,
|
||||||
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
|
{'id': volume_id,
|
||||||
'display_description': 'Test Desc',
|
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
|
||||||
'size': 20,
|
'display_description': 'Test Desc',
|
||||||
'status': 'creating',
|
'size': 20,
|
||||||
'instance_uuid': None,
|
'status': 'creating',
|
||||||
'host': 'dummy'})
|
'instance_uuid': None,
|
||||||
|
'host': 'dummy'})
|
||||||
try:
|
try:
|
||||||
self.volume.create_volume(self.context,
|
self.volume.create_volume(self.context,
|
||||||
volume_id,
|
volume_id,
|
||||||
@ -526,12 +527,13 @@ class VolumeTestCase(test.TestCase):
|
|||||||
image_id = 'aaaaaaaa-0000-0000-0000-000000000000'
|
image_id = 'aaaaaaaa-0000-0000-0000-000000000000'
|
||||||
# creating volume testdata
|
# creating volume testdata
|
||||||
volume_id = 1
|
volume_id = 1
|
||||||
db.volume_create(self.context, {'id': volume_id,
|
db.volume_create(self.context,
|
||||||
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
|
{'id': volume_id,
|
||||||
'display_description': 'Test Desc',
|
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
|
||||||
'size': 20,
|
'display_description': 'Test Desc',
|
||||||
'status': 'creating',
|
'size': 20,
|
||||||
'host': 'dummy'})
|
'status': 'creating',
|
||||||
|
'host': 'dummy'})
|
||||||
|
|
||||||
self.assertRaises(exception.ImageNotFound,
|
self.assertRaises(exception.ImageNotFound,
|
||||||
self.volume.create_volume,
|
self.volume.create_volume,
|
||||||
@ -557,19 +559,20 @@ class VolumeTestCase(test.TestCase):
|
|||||||
image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
|
image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
|
||||||
# creating volume testdata
|
# creating volume testdata
|
||||||
volume_id = 1
|
volume_id = 1
|
||||||
db.volume_create(self.context, {'id': volume_id,
|
db.volume_create(self.context,
|
||||||
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
|
{'id': volume_id,
|
||||||
'display_description': 'Test Desc',
|
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
|
||||||
'size': 20,
|
'display_description': 'Test Desc',
|
||||||
'status': 'uploading',
|
'size': 20,
|
||||||
'instance_uuid': None,
|
'status': 'uploading',
|
||||||
'host': 'dummy'})
|
'instance_uuid': None,
|
||||||
|
'host': 'dummy'})
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# start test
|
# start test
|
||||||
self.volume.copy_volume_to_image(self.context,
|
self.volume.copy_volume_to_image(self.context,
|
||||||
volume_id,
|
volume_id,
|
||||||
image_id)
|
image_id)
|
||||||
|
|
||||||
volume = db.volume_get(self.context, volume_id)
|
volume = db.volume_get(self.context, volume_id)
|
||||||
self.assertEqual(volume['status'], 'available')
|
self.assertEqual(volume['status'], 'available')
|
||||||
@ -591,21 +594,21 @@ class VolumeTestCase(test.TestCase):
|
|||||||
image_id = 'a440c04b-79fa-479c-bed1-0b816eaec379'
|
image_id = 'a440c04b-79fa-479c-bed1-0b816eaec379'
|
||||||
# creating volume testdata
|
# creating volume testdata
|
||||||
volume_id = 1
|
volume_id = 1
|
||||||
db.volume_create(self.context,
|
db.volume_create(
|
||||||
{'id': volume_id,
|
self.context,
|
||||||
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
|
{'id': volume_id,
|
||||||
'display_description': 'Test Desc',
|
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
|
||||||
'size': 20,
|
'display_description': 'Test Desc',
|
||||||
'status': 'uploading',
|
'size': 20,
|
||||||
'instance_uuid':
|
'status': 'uploading',
|
||||||
'b21f957d-a72f-4b93-b5a5-45b1161abb02',
|
'instance_uuid': 'b21f957d-a72f-4b93-b5a5-45b1161abb02',
|
||||||
'host': 'dummy'})
|
'host': 'dummy'})
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# start test
|
# start test
|
||||||
self.volume.copy_volume_to_image(self.context,
|
self.volume.copy_volume_to_image(self.context,
|
||||||
volume_id,
|
volume_id,
|
||||||
image_id)
|
image_id)
|
||||||
|
|
||||||
volume = db.volume_get(self.context, volume_id)
|
volume = db.volume_get(self.context, volume_id)
|
||||||
self.assertEqual(volume['status'], 'in-use')
|
self.assertEqual(volume['status'], 'in-use')
|
||||||
@ -626,12 +629,13 @@ class VolumeTestCase(test.TestCase):
|
|||||||
image_id = 'aaaaaaaa-0000-0000-0000-000000000000'
|
image_id = 'aaaaaaaa-0000-0000-0000-000000000000'
|
||||||
# creating volume testdata
|
# creating volume testdata
|
||||||
volume_id = 1
|
volume_id = 1
|
||||||
db.volume_create(self.context, {'id': volume_id,
|
db.volume_create(self.context,
|
||||||
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
|
{'id': volume_id,
|
||||||
'display_description': 'Test Desc',
|
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
|
||||||
'size': 20,
|
'display_description': 'Test Desc',
|
||||||
'status': 'in-use',
|
'size': 20,
|
||||||
'host': 'dummy'})
|
'status': 'in-use',
|
||||||
|
'host': 'dummy'})
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# start test
|
# start test
|
||||||
@ -663,7 +667,7 @@ class VolumeTestCase(test.TestCase):
|
|||||||
try:
|
try:
|
||||||
volume_id = None
|
volume_id = None
|
||||||
volume_api = cinder.volume.api.API(
|
volume_api = cinder.volume.api.API(
|
||||||
image_service=_FakeImageService())
|
image_service=_FakeImageService())
|
||||||
volume = volume_api.create(self.context, 2, 'name', 'description',
|
volume = volume_api.create(self.context, 2, 'name', 'description',
|
||||||
image_id=1)
|
image_id=1)
|
||||||
volume_id = volume['id']
|
volume_id = volume['id']
|
||||||
|
@ -50,12 +50,13 @@ class VolumeGlanceMetadataTestCase(test.TestCase):
|
|||||||
'value1')
|
'value1')
|
||||||
vol_metadata = db.volume_glance_metadata_create(ctxt, 2, 'key1',
|
vol_metadata = db.volume_glance_metadata_create(ctxt, 2, 'key1',
|
||||||
'value1')
|
'value1')
|
||||||
vol_metadata = db.volume_glance_metadata_create(ctxt, 2, 'key2',
|
vol_metadata = db.volume_glance_metadata_create(ctxt, 2,
|
||||||
|
'key2',
|
||||||
'value2')
|
'value2')
|
||||||
|
|
||||||
expected_metadata_1 = {'volume_id': '1',
|
expected_metadata_1 = {'volume_id': '1',
|
||||||
'key': 'key1',
|
'key': 'key1',
|
||||||
'value': 'value1'}
|
'value': 'value1'}
|
||||||
|
|
||||||
metadata = db.volume_glance_metadata_get(ctxt, 1)
|
metadata = db.volume_glance_metadata_get(ctxt, 1)
|
||||||
self.assertEqual(len(metadata), 1)
|
self.assertEqual(len(metadata), 1)
|
||||||
@ -106,8 +107,8 @@ class VolumeGlanceMetadataTestCase(test.TestCase):
|
|||||||
db.volume_glance_metadata_copy_to_snapshot(ctxt, 100, 1)
|
db.volume_glance_metadata_copy_to_snapshot(ctxt, 100, 1)
|
||||||
|
|
||||||
expected_meta = {'snapshot_id': '100',
|
expected_meta = {'snapshot_id': '100',
|
||||||
'key': 'key1',
|
'key': 'key1',
|
||||||
'value': 'value1'}
|
'value': 'value1'}
|
||||||
|
|
||||||
for meta in db.volume_snapshot_glance_metadata_get(ctxt, 100):
|
for meta in db.volume_snapshot_glance_metadata_get(ctxt, 100):
|
||||||
for (key, value) in expected_meta.items():
|
for (key, value) in expected_meta.items():
|
||||||
|
@ -109,56 +109,56 @@ class VolumeRpcAPITestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_create_volume(self):
|
def test_create_volume(self):
|
||||||
self._test_volume_api('create_volume',
|
self._test_volume_api('create_volume',
|
||||||
rpc_method='cast',
|
rpc_method='cast',
|
||||||
volume=self.fake_volume,
|
volume=self.fake_volume,
|
||||||
host='fake_host1',
|
host='fake_host1',
|
||||||
snapshot_id='fake_snapshot_id',
|
snapshot_id='fake_snapshot_id',
|
||||||
image_id='fake_image_id')
|
image_id='fake_image_id')
|
||||||
|
|
||||||
def test_delete_volume(self):
|
def test_delete_volume(self):
|
||||||
self._test_volume_api('delete_volume',
|
self._test_volume_api('delete_volume',
|
||||||
rpc_method='cast',
|
rpc_method='cast',
|
||||||
volume=self.fake_volume)
|
volume=self.fake_volume)
|
||||||
|
|
||||||
def test_create_snapshot(self):
|
def test_create_snapshot(self):
|
||||||
self._test_volume_api('create_snapshot',
|
self._test_volume_api('create_snapshot',
|
||||||
rpc_method='cast',
|
rpc_method='cast',
|
||||||
volume=self.fake_volume,
|
volume=self.fake_volume,
|
||||||
snapshot=self.fake_snapshot)
|
snapshot=self.fake_snapshot)
|
||||||
|
|
||||||
def test_delete_snapshot(self):
|
def test_delete_snapshot(self):
|
||||||
self._test_volume_api('delete_snapshot',
|
self._test_volume_api('delete_snapshot',
|
||||||
rpc_method='cast',
|
rpc_method='cast',
|
||||||
snapshot=self.fake_snapshot,
|
snapshot=self.fake_snapshot,
|
||||||
host='fake_host')
|
host='fake_host')
|
||||||
|
|
||||||
def test_attach_volume(self):
|
def test_attach_volume(self):
|
||||||
self._test_volume_api('attach_volume',
|
self._test_volume_api('attach_volume',
|
||||||
rpc_method='call',
|
rpc_method='call',
|
||||||
volume=self.fake_volume,
|
volume=self.fake_volume,
|
||||||
instance_uuid='fake_uuid',
|
instance_uuid='fake_uuid',
|
||||||
mountpoint='fake_mountpoint')
|
mountpoint='fake_mountpoint')
|
||||||
|
|
||||||
def test_detach_volume(self):
|
def test_detach_volume(self):
|
||||||
self._test_volume_api('detach_volume',
|
self._test_volume_api('detach_volume',
|
||||||
rpc_method='call',
|
rpc_method='call',
|
||||||
volume=self.fake_volume)
|
volume=self.fake_volume)
|
||||||
|
|
||||||
def test_copy_volume_to_image(self):
|
def test_copy_volume_to_image(self):
|
||||||
self._test_volume_api('copy_volume_to_image',
|
self._test_volume_api('copy_volume_to_image',
|
||||||
rpc_method='cast',
|
rpc_method='cast',
|
||||||
volume=self.fake_volume,
|
volume=self.fake_volume,
|
||||||
image_id='fake_image_id')
|
image_id='fake_image_id')
|
||||||
|
|
||||||
def test_initialize_connection(self):
|
def test_initialize_connection(self):
|
||||||
self._test_volume_api('initialize_connection',
|
self._test_volume_api('initialize_connection',
|
||||||
rpc_method='call',
|
rpc_method='call',
|
||||||
volume=self.fake_volume,
|
volume=self.fake_volume,
|
||||||
connector='fake_connector')
|
connector='fake_connector')
|
||||||
|
|
||||||
def test_terminate_connection(self):
|
def test_terminate_connection(self):
|
||||||
self._test_volume_api('terminate_connection',
|
self._test_volume_api('terminate_connection',
|
||||||
rpc_method='call',
|
rpc_method='call',
|
||||||
volume=self.fake_volume,
|
volume=self.fake_volume,
|
||||||
connector='fake_connector',
|
connector='fake_connector',
|
||||||
force=False)
|
force=False)
|
||||||
|
@ -33,21 +33,20 @@ LOG = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
class VolumeTypeTestCase(test.TestCase):
|
class VolumeTypeTestCase(test.TestCase):
|
||||||
"""Test cases for volume type code"""
|
"""Test cases for volume type code."""
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(VolumeTypeTestCase, self).setUp()
|
super(VolumeTypeTestCase, self).setUp()
|
||||||
|
|
||||||
self.ctxt = context.get_admin_context()
|
self.ctxt = context.get_admin_context()
|
||||||
self.vol_type1_name = str(int(time.time()))
|
self.vol_type1_name = str(int(time.time()))
|
||||||
self.vol_type1_specs = dict(
|
self.vol_type1_specs = dict(type="physical drive",
|
||||||
type="physical drive",
|
drive_type="SAS",
|
||||||
drive_type="SAS",
|
size="300",
|
||||||
size="300",
|
rpm="7200",
|
||||||
rpm="7200",
|
visible="True")
|
||||||
visible="True")
|
|
||||||
|
|
||||||
def test_volume_type_create_then_destroy(self):
|
def test_volume_type_create_then_destroy(self):
|
||||||
"""Ensure volume types can be created and deleted"""
|
"""Ensure volume types can be created and deleted."""
|
||||||
prev_all_vtypes = volume_types.get_all_types(self.ctxt)
|
prev_all_vtypes = volume_types.get_all_types(self.ctxt)
|
||||||
|
|
||||||
volume_types.create(self.ctxt,
|
volume_types.create(self.ctxt,
|
||||||
@ -75,14 +74,14 @@ class VolumeTypeTestCase(test.TestCase):
|
|||||||
'drive type was not deleted')
|
'drive type was not deleted')
|
||||||
|
|
||||||
def test_get_all_volume_types(self):
|
def test_get_all_volume_types(self):
|
||||||
"""Ensures that all volume types can be retrieved"""
|
"""Ensures that all volume types can be retrieved."""
|
||||||
session = sql_session.get_session()
|
session = sql_session.get_session()
|
||||||
total_volume_types = session.query(models.VolumeTypes).count()
|
total_volume_types = session.query(models.VolumeTypes).count()
|
||||||
vol_types = volume_types.get_all_types(self.ctxt)
|
vol_types = volume_types.get_all_types(self.ctxt)
|
||||||
self.assertEqual(total_volume_types, len(vol_types))
|
self.assertEqual(total_volume_types, len(vol_types))
|
||||||
|
|
||||||
def test_get_default_volume_type(self):
|
def test_get_default_volume_type(self):
|
||||||
""" Ensures default volume type can be retrieved """
|
"""Ensures default volume type can be retrieved."""
|
||||||
volume_types.create(self.ctxt,
|
volume_types.create(self.ctxt,
|
||||||
fake_flags.def_vol_type,
|
fake_flags.def_vol_type,
|
||||||
{})
|
{})
|
||||||
@ -91,26 +90,26 @@ class VolumeTypeTestCase(test.TestCase):
|
|||||||
fake_flags.def_vol_type)
|
fake_flags.def_vol_type)
|
||||||
|
|
||||||
def test_default_volume_type_missing_in_db(self):
|
def test_default_volume_type_missing_in_db(self):
|
||||||
""" Ensures proper exception raised if default volume type
|
"""Ensures proper exception raised if default volume type
|
||||||
is not in database. """
|
is not in database."""
|
||||||
session = sql_session.get_session()
|
session = sql_session.get_session()
|
||||||
default_vol_type = volume_types.get_default_volume_type()
|
default_vol_type = volume_types.get_default_volume_type()
|
||||||
self.assertEqual(default_vol_type, {})
|
self.assertEqual(default_vol_type, {})
|
||||||
|
|
||||||
def test_non_existent_vol_type_shouldnt_delete(self):
|
def test_non_existent_vol_type_shouldnt_delete(self):
|
||||||
"""Ensures that volume type creation fails with invalid args"""
|
"""Ensures that volume type creation fails with invalid args."""
|
||||||
self.assertRaises(exception.VolumeTypeNotFoundByName,
|
self.assertRaises(exception.VolumeTypeNotFoundByName,
|
||||||
volume_types.destroy, self.ctxt, "sfsfsdfdfs")
|
volume_types.destroy, self.ctxt, "sfsfsdfdfs")
|
||||||
|
|
||||||
def test_repeated_vol_types_shouldnt_raise(self):
|
def test_repeated_vol_types_shouldnt_raise(self):
|
||||||
"""Ensures that volume duplicates don't raise"""
|
"""Ensures that volume duplicates don't raise."""
|
||||||
new_name = self.vol_type1_name + "dup"
|
new_name = self.vol_type1_name + "dup"
|
||||||
volume_types.create(self.ctxt, new_name)
|
volume_types.create(self.ctxt, new_name)
|
||||||
volume_types.destroy(self.ctxt, new_name)
|
volume_types.destroy(self.ctxt, new_name)
|
||||||
volume_types.create(self.ctxt, new_name)
|
volume_types.create(self.ctxt, new_name)
|
||||||
|
|
||||||
def test_invalid_volume_types_params(self):
|
def test_invalid_volume_types_params(self):
|
||||||
"""Ensures that volume type creation fails with invalid args"""
|
"""Ensures that volume type creation fails with invalid args."""
|
||||||
self.assertRaises(exception.InvalidVolumeType,
|
self.assertRaises(exception.InvalidVolumeType,
|
||||||
volume_types.destroy, self.ctxt, None)
|
volume_types.destroy, self.ctxt, None)
|
||||||
self.assertRaises(exception.InvalidVolumeType,
|
self.assertRaises(exception.InvalidVolumeType,
|
||||||
@ -120,7 +119,7 @@ class VolumeTypeTestCase(test.TestCase):
|
|||||||
self.ctxt, None)
|
self.ctxt, None)
|
||||||
|
|
||||||
def test_volume_type_get_by_id_and_name(self):
|
def test_volume_type_get_by_id_and_name(self):
|
||||||
"""Ensure volume types get returns same entry"""
|
"""Ensure volume types get returns same entry."""
|
||||||
volume_types.create(self.ctxt,
|
volume_types.create(self.ctxt,
|
||||||
self.vol_type1_name,
|
self.vol_type1_name,
|
||||||
self.vol_type1_specs)
|
self.vol_type1_specs)
|
||||||
@ -131,7 +130,7 @@ class VolumeTypeTestCase(test.TestCase):
|
|||||||
self.assertEqual(new, new2)
|
self.assertEqual(new, new2)
|
||||||
|
|
||||||
def test_volume_type_search_by_extra_spec(self):
|
def test_volume_type_search_by_extra_spec(self):
|
||||||
"""Ensure volume types get by extra spec returns correct type"""
|
"""Ensure volume types get by extra spec returns correct type."""
|
||||||
volume_types.create(self.ctxt, "type1", {"key1": "val1",
|
volume_types.create(self.ctxt, "type1", {"key1": "val1",
|
||||||
"key2": "val2"})
|
"key2": "val2"})
|
||||||
volume_types.create(self.ctxt, "type2", {"key2": "val2",
|
volume_types.create(self.ctxt, "type2", {"key2": "val2",
|
||||||
@ -139,29 +138,32 @@ class VolumeTypeTestCase(test.TestCase):
|
|||||||
volume_types.create(self.ctxt, "type3", {"key3": "another_value",
|
volume_types.create(self.ctxt, "type3", {"key3": "another_value",
|
||||||
"key4": "val4"})
|
"key4": "val4"})
|
||||||
|
|
||||||
vol_types = volume_types.get_all_types(self.ctxt,
|
vol_types = volume_types.get_all_types(
|
||||||
search_opts={'extra_specs': {"key1": "val1"}})
|
self.ctxt,
|
||||||
|
search_opts={'extra_specs': {"key1": "val1"}})
|
||||||
LOG.info("vol_types: %s" % vol_types)
|
LOG.info("vol_types: %s" % vol_types)
|
||||||
self.assertEqual(len(vol_types), 1)
|
self.assertEqual(len(vol_types), 1)
|
||||||
self.assertTrue("type1" in vol_types.keys())
|
self.assertTrue("type1" in vol_types.keys())
|
||||||
self.assertEqual(vol_types['type1']['extra_specs'],
|
self.assertEqual(vol_types['type1']['extra_specs'],
|
||||||
{"key1": "val1", "key2": "val2"})
|
{"key1": "val1", "key2": "val2"})
|
||||||
|
|
||||||
vol_types = volume_types.get_all_types(self.ctxt,
|
vol_types = volume_types.get_all_types(
|
||||||
search_opts={'extra_specs': {"key2": "val2"}})
|
self.ctxt,
|
||||||
|
search_opts={'extra_specs': {"key2": "val2"}})
|
||||||
LOG.info("vol_types: %s" % vol_types)
|
LOG.info("vol_types: %s" % vol_types)
|
||||||
self.assertEqual(len(vol_types), 2)
|
self.assertEqual(len(vol_types), 2)
|
||||||
self.assertTrue("type1" in vol_types.keys())
|
self.assertTrue("type1" in vol_types.keys())
|
||||||
self.assertTrue("type2" in vol_types.keys())
|
self.assertTrue("type2" in vol_types.keys())
|
||||||
|
|
||||||
vol_types = volume_types.get_all_types(self.ctxt,
|
vol_types = volume_types.get_all_types(
|
||||||
search_opts={'extra_specs': {"key3": "val3"}})
|
self.ctxt,
|
||||||
|
search_opts={'extra_specs': {"key3": "val3"}})
|
||||||
LOG.info("vol_types: %s" % vol_types)
|
LOG.info("vol_types: %s" % vol_types)
|
||||||
self.assertEqual(len(vol_types), 1)
|
self.assertEqual(len(vol_types), 1)
|
||||||
self.assertTrue("type2" in vol_types.keys())
|
self.assertTrue("type2" in vol_types.keys())
|
||||||
|
|
||||||
def test_volume_type_search_by_extra_spec_multiple(self):
|
def test_volume_type_search_by_extra_spec_multiple(self):
|
||||||
"""Ensure volume types get by extra spec returns correct type"""
|
"""Ensure volume types get by extra spec returns correct type."""
|
||||||
volume_types.create(self.ctxt, "type1", {"key1": "val1",
|
volume_types.create(self.ctxt, "type1", {"key1": "val1",
|
||||||
"key2": "val2",
|
"key2": "val2",
|
||||||
"key3": "val3"})
|
"key3": "val3"})
|
||||||
@ -171,9 +173,10 @@ class VolumeTypeTestCase(test.TestCase):
|
|||||||
"key3": "val3",
|
"key3": "val3",
|
||||||
"key4": "val4"})
|
"key4": "val4"})
|
||||||
|
|
||||||
vol_types = volume_types.get_all_types(self.ctxt,
|
vol_types = volume_types.get_all_types(
|
||||||
search_opts={'extra_specs': {"key1": "val1",
|
self.ctxt,
|
||||||
"key3": "val3"}})
|
search_opts={'extra_specs': {"key1": "val1",
|
||||||
|
"key3": "val3"}})
|
||||||
LOG.info("vol_types: %s" % vol_types)
|
LOG.info("vol_types: %s" % vol_types)
|
||||||
self.assertEqual(len(vol_types), 2)
|
self.assertEqual(len(vol_types), 2)
|
||||||
self.assertTrue("type1" in vol_types.keys())
|
self.assertTrue("type1" in vol_types.keys())
|
||||||
|
@ -30,8 +30,8 @@ class VolumeTypeExtraSpecsTestCase(test.TestCase):
|
|||||||
self.context = context.get_admin_context()
|
self.context = context.get_admin_context()
|
||||||
self.vol_type1 = dict(name="TEST: Regular volume test")
|
self.vol_type1 = dict(name="TEST: Regular volume test")
|
||||||
self.vol_type1_specs = dict(vol_extra1="value1",
|
self.vol_type1_specs = dict(vol_extra1="value1",
|
||||||
vol_extra2="value2",
|
vol_extra2="value2",
|
||||||
vol_extra3=3)
|
vol_extra3=3)
|
||||||
self.vol_type1['extra_specs'] = self.vol_type1_specs
|
self.vol_type1['extra_specs'] = self.vol_type1_specs
|
||||||
ref = db.volume_type_create(self.context, self.vol_type1)
|
ref = db.volume_type_create(self.context, self.vol_type1)
|
||||||
self.volume_type1_id = ref.id
|
self.volume_type1_id = ref.id
|
||||||
@ -53,31 +53,31 @@ class VolumeTypeExtraSpecsTestCase(test.TestCase):
|
|||||||
def test_volume_type_specs_get(self):
|
def test_volume_type_specs_get(self):
|
||||||
expected_specs = self.vol_type1_specs.copy()
|
expected_specs = self.vol_type1_specs.copy()
|
||||||
actual_specs = db.volume_type_extra_specs_get(
|
actual_specs = db.volume_type_extra_specs_get(
|
||||||
context.get_admin_context(),
|
context.get_admin_context(),
|
||||||
self.volume_type1_id)
|
self.volume_type1_id)
|
||||||
self.assertEquals(expected_specs, actual_specs)
|
self.assertEquals(expected_specs, actual_specs)
|
||||||
|
|
||||||
def test_volume_type_extra_specs_delete(self):
|
def test_volume_type_extra_specs_delete(self):
|
||||||
expected_specs = self.vol_type1_specs.copy()
|
expected_specs = self.vol_type1_specs.copy()
|
||||||
del expected_specs['vol_extra2']
|
del expected_specs['vol_extra2']
|
||||||
db.volume_type_extra_specs_delete(context.get_admin_context(),
|
db.volume_type_extra_specs_delete(context.get_admin_context(),
|
||||||
self.volume_type1_id,
|
self.volume_type1_id,
|
||||||
'vol_extra2')
|
'vol_extra2')
|
||||||
actual_specs = db.volume_type_extra_specs_get(
|
actual_specs = db.volume_type_extra_specs_get(
|
||||||
context.get_admin_context(),
|
context.get_admin_context(),
|
||||||
self.volume_type1_id)
|
self.volume_type1_id)
|
||||||
self.assertEquals(expected_specs, actual_specs)
|
self.assertEquals(expected_specs, actual_specs)
|
||||||
|
|
||||||
def test_volume_type_extra_specs_update(self):
|
def test_volume_type_extra_specs_update(self):
|
||||||
expected_specs = self.vol_type1_specs.copy()
|
expected_specs = self.vol_type1_specs.copy()
|
||||||
expected_specs['vol_extra3'] = "4"
|
expected_specs['vol_extra3'] = "4"
|
||||||
db.volume_type_extra_specs_update_or_create(
|
db.volume_type_extra_specs_update_or_create(
|
||||||
context.get_admin_context(),
|
context.get_admin_context(),
|
||||||
self.volume_type1_id,
|
self.volume_type1_id,
|
||||||
dict(vol_extra3=4))
|
dict(vol_extra3=4))
|
||||||
actual_specs = db.volume_type_extra_specs_get(
|
actual_specs = db.volume_type_extra_specs_get(
|
||||||
context.get_admin_context(),
|
context.get_admin_context(),
|
||||||
self.volume_type1_id)
|
self.volume_type1_id)
|
||||||
self.assertEquals(expected_specs, actual_specs)
|
self.assertEquals(expected_specs, actual_specs)
|
||||||
|
|
||||||
def test_volume_type_extra_specs_create(self):
|
def test_volume_type_extra_specs_create(self):
|
||||||
@ -85,37 +85,37 @@ class VolumeTypeExtraSpecsTestCase(test.TestCase):
|
|||||||
expected_specs['vol_extra4'] = 'value4'
|
expected_specs['vol_extra4'] = 'value4'
|
||||||
expected_specs['vol_extra5'] = 'value5'
|
expected_specs['vol_extra5'] = 'value5'
|
||||||
db.volume_type_extra_specs_update_or_create(
|
db.volume_type_extra_specs_update_or_create(
|
||||||
context.get_admin_context(),
|
context.get_admin_context(),
|
||||||
self.volume_type1_id,
|
self.volume_type1_id,
|
||||||
dict(vol_extra4="value4",
|
dict(vol_extra4="value4",
|
||||||
vol_extra5="value5"))
|
vol_extra5="value5"))
|
||||||
actual_specs = db.volume_type_extra_specs_get(
|
actual_specs = db.volume_type_extra_specs_get(
|
||||||
context.get_admin_context(),
|
context.get_admin_context(),
|
||||||
self.volume_type1_id)
|
self.volume_type1_id)
|
||||||
self.assertEquals(expected_specs, actual_specs)
|
self.assertEquals(expected_specs, actual_specs)
|
||||||
|
|
||||||
def test_volume_type_get_with_extra_specs(self):
|
def test_volume_type_get_with_extra_specs(self):
|
||||||
volume_type = db.volume_type_get(
|
volume_type = db.volume_type_get(
|
||||||
context.get_admin_context(),
|
context.get_admin_context(),
|
||||||
self.volume_type1_id)
|
self.volume_type1_id)
|
||||||
self.assertEquals(volume_type['extra_specs'],
|
self.assertEquals(volume_type['extra_specs'],
|
||||||
self.vol_type1_specs)
|
self.vol_type1_specs)
|
||||||
|
|
||||||
volume_type = db.volume_type_get(
|
volume_type = db.volume_type_get(
|
||||||
context.get_admin_context(),
|
context.get_admin_context(),
|
||||||
self.vol_type2_id)
|
self.vol_type2_id)
|
||||||
self.assertEquals(volume_type['extra_specs'], {})
|
self.assertEquals(volume_type['extra_specs'], {})
|
||||||
|
|
||||||
def test_volume_type_get_by_name_with_extra_specs(self):
|
def test_volume_type_get_by_name_with_extra_specs(self):
|
||||||
volume_type = db.volume_type_get_by_name(
|
volume_type = db.volume_type_get_by_name(
|
||||||
context.get_admin_context(),
|
context.get_admin_context(),
|
||||||
self.vol_type1['name'])
|
self.vol_type1['name'])
|
||||||
self.assertEquals(volume_type['extra_specs'],
|
self.assertEquals(volume_type['extra_specs'],
|
||||||
self.vol_type1_specs)
|
self.vol_type1_specs)
|
||||||
|
|
||||||
volume_type = db.volume_type_get_by_name(
|
volume_type = db.volume_type_get_by_name(
|
||||||
context.get_admin_context(),
|
context.get_admin_context(),
|
||||||
self.vol_type2_noextra['name'])
|
self.vol_type2_noextra['name'])
|
||||||
self.assertEquals(volume_type['extra_specs'], {})
|
self.assertEquals(volume_type['extra_specs'], {})
|
||||||
|
|
||||||
def test_volume_type_get_all(self):
|
def test_volume_type_get_all(self):
|
||||||
|
@ -52,7 +52,7 @@ class UsageInfoTestCase(test.TestCase):
|
|||||||
super(UsageInfoTestCase, self).tearDown()
|
super(UsageInfoTestCase, self).tearDown()
|
||||||
|
|
||||||
def _create_volume(self, params={}):
|
def _create_volume(self, params={}):
|
||||||
"""Create a test volume"""
|
"""Create a test volume."""
|
||||||
vol = {}
|
vol = {}
|
||||||
vol['snapshot_id'] = self.snapshot_id
|
vol['snapshot_id'] = self.snapshot_id
|
||||||
vol['user_id'] = self.user_id
|
vol['user_id'] = self.user_id
|
||||||
|
@ -74,32 +74,34 @@ class TestWindowsDriver(basetestcase.BaseTestCase):
|
|||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
try:
|
try:
|
||||||
if self._volume_data_2 and \
|
if (self._volume_data_2 and
|
||||||
self._wutils.volume_exists(
|
self._wutils.volume_exists(self._volume_data_2['name'])):
|
||||||
self._volume_data_2['name']):
|
|
||||||
self._wutils.delete_volume(self._volume_data_2['name'])
|
self._wutils.delete_volume(self._volume_data_2['name'])
|
||||||
if self._volume_data and \
|
|
||||||
self._wutils.volume_exists(
|
if (self._volume_data and
|
||||||
self._volume_data['name']):
|
self._wutils.volume_exists(
|
||||||
|
self._volume_data['name'])):
|
||||||
self._wutils.delete_volume(self._volume_data['name'])
|
self._wutils.delete_volume(self._volume_data['name'])
|
||||||
if self._snapshot_data and \
|
if (self._snapshot_data and
|
||||||
self._wutils.snapshot_exists(
|
self._wutils.snapshot_exists(
|
||||||
self._snapshot_data['name']):
|
self._snapshot_data['name'])):
|
||||||
self._wutils.delete_snapshot(self._snapshot_data['name'])
|
self._wutils.delete_snapshot(self._snapshot_data['name'])
|
||||||
if self._connector_data and \
|
if (self._connector_data and
|
||||||
self._wutils.initiator_id_exists(
|
self._wutils.initiator_id_exists(
|
||||||
"%s%s" % (FLAGS.iscsi_target_prefix,
|
"%s%s" % (FLAGS.iscsi_target_prefix,
|
||||||
self._volume_data['name']),
|
self._volume_data['name']),
|
||||||
self._connector_data['initiator']):
|
self._connector_data['initiator'])):
|
||||||
target_name = "%s%s" % (FLAGS.iscsi_target_prefix,
|
target_name = "%s%s" % (FLAGS.iscsi_target_prefix,
|
||||||
self._volume_data['name'])
|
self._volume_data['name'])
|
||||||
initiator_name = self._connector_data['initiator']
|
initiator_name = self._connector_data['initiator']
|
||||||
self._wutils.delete_initiator_id(target_name, initiator_name)
|
self._wutils.delete_initiator_id(target_name, initiator_name)
|
||||||
if self._volume_data and \
|
if (self._volume_data and
|
||||||
self._wutils.export_exists("%s%s" % (FLAGS.iscsi_target_prefix,
|
self._wutils.export_exists("%s%s" %
|
||||||
self._volume_data['name'])):
|
(FLAGS.iscsi_target_prefix,
|
||||||
self._wutils.delete_export("%s%s" % (FLAGS.iscsi_target_prefix,
|
self._volume_data['name']))):
|
||||||
self._volume_data['name']))
|
self._wutils.delete_export(
|
||||||
|
"%s%s" % (FLAGS.iscsi_target_prefix,
|
||||||
|
self._volume_data['name']))
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
super(TestWindowsDriver, self).tearDown()
|
super(TestWindowsDriver, self).tearDown()
|
||||||
@ -178,9 +180,10 @@ class TestWindowsDriver(basetestcase.BaseTestCase):
|
|||||||
retval = self._drv.create_export({}, self._volume_data)
|
retval = self._drv.create_export({}, self._volume_data)
|
||||||
|
|
||||||
volume_name = self._volume_data['name']
|
volume_name = self._volume_data['name']
|
||||||
self.assertEquals(retval,
|
self.assertEquals(
|
||||||
{'provider_location':
|
retval,
|
||||||
"%s%s" % (FLAGS.iscsi_target_prefix, volume_name)})
|
{'provider_location': "%s%s" % (FLAGS.iscsi_target_prefix,
|
||||||
|
volume_name)})
|
||||||
|
|
||||||
def test_initialize_connection(self):
|
def test_initialize_connection(self):
|
||||||
#Create a volume
|
#Create a volume
|
||||||
|
@ -83,9 +83,7 @@ class DriverTestCase(unittest.TestCase):
|
|||||||
size=1, display_name='name', display_description='desc'))
|
size=1, display_name='name', display_description='desc'))
|
||||||
mock.VerifyAll()
|
mock.VerifyAll()
|
||||||
|
|
||||||
self.assertEquals(dict(
|
self.assertEquals(dict(provider_location='sr_uuid/vdi_uuid'), result)
|
||||||
provider_location='sr_uuid/vdi_uuid'
|
|
||||||
), result)
|
|
||||||
|
|
||||||
def test_delete_volume(self):
|
def test_delete_volume(self):
|
||||||
mock = mox.Mox()
|
mock = mox.Mox()
|
||||||
|
@ -29,15 +29,11 @@ from cinder.volume.drivers import xiv
|
|||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
FAKE = "fake"
|
FAKE = "fake"
|
||||||
VOLUME = {
|
VOLUME = {'size': 16,
|
||||||
'size': 16,
|
'name': FAKE,
|
||||||
'name': FAKE,
|
'id': 1}
|
||||||
'id': 1
|
|
||||||
}
|
|
||||||
|
|
||||||
CONNECTOR = {
|
CONNECTOR = {'initiator': "iqn.2012-07.org.fake:01:948f189c4695", }
|
||||||
'initiator': "iqn.2012-07.org.fake:01:948f189c4695",
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class XIVFakeProxyDriver(object):
|
class XIVFakeProxyDriver(object):
|
||||||
@ -82,21 +78,18 @@ class XIVFakeProxyDriver(object):
|
|||||||
|
|
||||||
self.volumes[volume['name']]['attached'] = connector
|
self.volumes[volume['name']]['attached'] = connector
|
||||||
|
|
||||||
return {
|
return {'driver_volume_type': 'iscsi',
|
||||||
'driver_volume_type': 'iscsi',
|
'data': {'target_discovered': True,
|
||||||
'data': {
|
'target_discovered': True,
|
||||||
'target_discovered': True,
|
'target_portal': self.xiv_portal,
|
||||||
'target_portal': self.xiv_portal,
|
'target_iqn': self.xiv_iqn,
|
||||||
'target_iqn': self.xiv_iqn,
|
'target_lun': lun_id,
|
||||||
'target_lun': lun_id,
|
'volume_id': volume['id'],
|
||||||
'volume_id': volume['id'],
|
'multipath': True,
|
||||||
'multipath': True,
|
'provider_location': "%s,1 %s %s" % (
|
||||||
# part of a patch to nova-compute to enable iscsi multipath
|
self.xiv_portal,
|
||||||
'provider_location': "%s,1 %s %s" % (
|
self.xiv_iqn,
|
||||||
self.xiv_portal,
|
lun_id), },
|
||||||
self.xiv_iqn,
|
|
||||||
lun_id),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def terminate_connection(self, volume, connector):
|
def terminate_connection(self, volume, connector):
|
||||||
@ -110,8 +103,8 @@ class XIVFakeProxyDriver(object):
|
|||||||
if not self.volume_exists(volume):
|
if not self.volume_exists(volume):
|
||||||
raise self.exception.VolumeNotFound()
|
raise self.exception.VolumeNotFound()
|
||||||
|
|
||||||
return self.volumes[volume['name']].get('attached', None) \
|
return (self.volumes[volume['name']].get('attached', None)
|
||||||
== connector
|
== connector)
|
||||||
|
|
||||||
|
|
||||||
class XIVVolumeDriverTest(test.TestCase):
|
class XIVVolumeDriverTest(test.TestCase):
|
||||||
@ -126,18 +119,14 @@ class XIVVolumeDriverTest(test.TestCase):
|
|||||||
def test_initialized_should_set_xiv_info(self):
|
def test_initialized_should_set_xiv_info(self):
|
||||||
"""Test that the san flags are passed to the XIV proxy."""
|
"""Test that the san flags are passed to the XIV proxy."""
|
||||||
|
|
||||||
self.assertEquals(
|
self.assertEquals(self.driver.xiv_proxy.xiv_info['xiv_user'],
|
||||||
self.driver.xiv_proxy.xiv_info['xiv_user'],
|
FLAGS.san_login)
|
||||||
FLAGS.san_login)
|
self.assertEquals(self.driver.xiv_proxy.xiv_info['xiv_pass'],
|
||||||
self.assertEquals(
|
FLAGS.san_password)
|
||||||
self.driver.xiv_proxy.xiv_info['xiv_pass'],
|
self.assertEquals(self.driver.xiv_proxy.xiv_info['xiv_address'],
|
||||||
FLAGS.san_password)
|
FLAGS.san_ip)
|
||||||
self.assertEquals(
|
self.assertEquals(self.driver.xiv_proxy.xiv_info['xiv_vol_pool'],
|
||||||
self.driver.xiv_proxy.xiv_info['xiv_address'],
|
FLAGS.san_clustername)
|
||||||
FLAGS.san_ip)
|
|
||||||
self.assertEquals(
|
|
||||||
self.driver.xiv_proxy.xiv_info['xiv_vol_pool'],
|
|
||||||
FLAGS.san_clustername)
|
|
||||||
|
|
||||||
def test_setup_should_fail_if_credentials_are_invalid(self):
|
def test_setup_should_fail_if_credentials_are_invalid(self):
|
||||||
"""Test that the xiv_proxy validates credentials."""
|
"""Test that the xiv_proxy validates credentials."""
|
||||||
@ -186,8 +175,10 @@ class XIVVolumeDriverTest(test.TestCase):
|
|||||||
|
|
||||||
self.driver.do_setup(None)
|
self.driver.do_setup(None)
|
||||||
self.assertRaises(exception.VolumeBackendAPIException,
|
self.assertRaises(exception.VolumeBackendAPIException,
|
||||||
self.driver.create_volume,
|
self.driver.create_volume,
|
||||||
{'name': FAKE, 'id': 1, 'size': 12000})
|
{'name': FAKE,
|
||||||
|
'id': 1,
|
||||||
|
'size': 12000})
|
||||||
|
|
||||||
def test_initialize_connection(self):
|
def test_initialize_connection(self):
|
||||||
"""Test that inititialize connection attaches volume to host."""
|
"""Test that inititialize connection attaches volume to host."""
|
||||||
@ -197,7 +188,7 @@ class XIVVolumeDriverTest(test.TestCase):
|
|||||||
self.driver.initialize_connection(VOLUME, CONNECTOR)
|
self.driver.initialize_connection(VOLUME, CONNECTOR)
|
||||||
|
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
self.driver.xiv_proxy.is_volume_attached(VOLUME, CONNECTOR))
|
self.driver.xiv_proxy.is_volume_attached(VOLUME, CONNECTOR))
|
||||||
|
|
||||||
self.driver.terminate_connection(VOLUME, CONNECTOR)
|
self.driver.terminate_connection(VOLUME, CONNECTOR)
|
||||||
self.driver.delete_volume(VOLUME)
|
self.driver.delete_volume(VOLUME)
|
||||||
@ -207,7 +198,9 @@ class XIVVolumeDriverTest(test.TestCase):
|
|||||||
|
|
||||||
self.driver.do_setup(None)
|
self.driver.do_setup(None)
|
||||||
self.assertRaises(exception.VolumeNotFound,
|
self.assertRaises(exception.VolumeNotFound,
|
||||||
self.driver.initialize_connection, VOLUME, CONNECTOR)
|
self.driver.initialize_connection,
|
||||||
|
VOLUME,
|
||||||
|
CONNECTOR)
|
||||||
|
|
||||||
def test_terminate_connection(self):
|
def test_terminate_connection(self):
|
||||||
"""Test terminating a connection."""
|
"""Test terminating a connection."""
|
||||||
@ -217,10 +210,8 @@ class XIVVolumeDriverTest(test.TestCase):
|
|||||||
self.driver.initialize_connection(VOLUME, CONNECTOR)
|
self.driver.initialize_connection(VOLUME, CONNECTOR)
|
||||||
self.driver.terminate_connection(VOLUME, CONNECTOR)
|
self.driver.terminate_connection(VOLUME, CONNECTOR)
|
||||||
|
|
||||||
self.assertFalse(
|
self.assertFalse(self.driver.xiv_proxy.is_volume_attached(VOLUME,
|
||||||
self.driver.xiv_proxy.is_volume_attached(
|
CONNECTOR))
|
||||||
VOLUME,
|
|
||||||
CONNECTOR))
|
|
||||||
|
|
||||||
self.driver.delete_volume(VOLUME)
|
self.driver.delete_volume(VOLUME)
|
||||||
|
|
||||||
@ -229,7 +220,9 @@ class XIVVolumeDriverTest(test.TestCase):
|
|||||||
|
|
||||||
self.driver.do_setup(None)
|
self.driver.do_setup(None)
|
||||||
self.assertRaises(exception.VolumeNotFound,
|
self.assertRaises(exception.VolumeNotFound,
|
||||||
self.driver.terminate_connection, VOLUME, CONNECTOR)
|
self.driver.terminate_connection,
|
||||||
|
VOLUME,
|
||||||
|
CONNECTOR)
|
||||||
|
|
||||||
def test_terminate_connection_should_fail_on_non_attached_volume(self):
|
def test_terminate_connection_should_fail_on_non_attached_volume(self):
|
||||||
"""Test that terminate won't work for volumes that are not attached."""
|
"""Test that terminate won't work for volumes that are not attached."""
|
||||||
@ -238,6 +231,8 @@ class XIVVolumeDriverTest(test.TestCase):
|
|||||||
self.driver.create_volume(VOLUME)
|
self.driver.create_volume(VOLUME)
|
||||||
|
|
||||||
self.assertRaises(exception.VolumeNotFoundForInstance,
|
self.assertRaises(exception.VolumeNotFoundForInstance,
|
||||||
self.driver.terminate_connection, VOLUME, CONNECTOR)
|
self.driver.terminate_connection,
|
||||||
|
VOLUME,
|
||||||
|
CONNECTOR)
|
||||||
|
|
||||||
self.driver.delete_volume(VOLUME)
|
self.driver.delete_volume(VOLUME)
|
||||||
|
@ -105,7 +105,7 @@ class FakeRequest(object):
|
|||||||
('/api/vcontrollers.xml', self._list_controllers),
|
('/api/vcontrollers.xml', self._list_controllers),
|
||||||
('/api/servers.xml', self._list_servers),
|
('/api/servers.xml', self._list_servers),
|
||||||
('/api/volumes/*/servers.xml',
|
('/api/volumes/*/servers.xml',
|
||||||
self._list_vol_attachments)]
|
self._list_vol_attachments)]
|
||||||
}
|
}
|
||||||
|
|
||||||
ops_list = ops[self.method]
|
ops_list = ops[self.method]
|
||||||
@ -139,8 +139,8 @@ class FakeRequest(object):
|
|||||||
|
|
||||||
def _login(self):
|
def _login(self):
|
||||||
params = self._get_parameters(self.body)
|
params = self._get_parameters(self.body)
|
||||||
if params['user'] == RUNTIME_VARS['user'] and\
|
if (params['user'] == RUNTIME_VARS['user'] and
|
||||||
params['password'] == RUNTIME_VARS['password']:
|
params['password'] == RUNTIME_VARS['password']):
|
||||||
return RUNTIME_VARS['login'] % RUNTIME_VARS['access_key']
|
return RUNTIME_VARS['login'] % RUNTIME_VARS['access_key']
|
||||||
else:
|
else:
|
||||||
return RUNTIME_VARS['bad_login']
|
return RUNTIME_VARS['bad_login']
|
||||||
@ -246,8 +246,10 @@ class FakeRequest(object):
|
|||||||
<created-at type='datetime'>2012-01-28...</created-at>
|
<created-at type='datetime'>2012-01-28...</created-at>
|
||||||
<modified-at type='datetime'>2012-01-28...</modified-at>
|
<modified-at type='datetime'>2012-01-28...</modified-at>
|
||||||
</volume>"""
|
</volume>"""
|
||||||
return self._generate_list_resp(header, footer, body,
|
return self._generate_list_resp(header,
|
||||||
RUNTIME_VARS['volumes'])
|
footer,
|
||||||
|
body,
|
||||||
|
RUNTIME_VARS['volumes'])
|
||||||
|
|
||||||
def _list_controllers(self):
|
def _list_controllers(self):
|
||||||
header = """<show-vcontrollers-response>
|
header = """<show-vcontrollers-response>
|
||||||
@ -267,8 +269,10 @@ class FakeRequest(object):
|
|||||||
<chap-username>test_chap_user</chap-username>
|
<chap-username>test_chap_user</chap-username>
|
||||||
<chap-target-secret>test_chap_secret</chap-target-secret>
|
<chap-target-secret>test_chap_secret</chap-target-secret>
|
||||||
</vcontroller>"""
|
</vcontroller>"""
|
||||||
return self._generate_list_resp(header, footer, body,
|
return self._generate_list_resp(header,
|
||||||
RUNTIME_VARS['controllers'])
|
footer,
|
||||||
|
body,
|
||||||
|
RUNTIME_VARS['controllers'])
|
||||||
|
|
||||||
def _list_servers(self):
|
def _list_servers(self):
|
||||||
header = """<show-servers-response>
|
header = """<show-servers-response>
|
||||||
@ -317,7 +321,8 @@ class FakeRequest(object):
|
|||||||
for server in attachments:
|
for server in attachments:
|
||||||
srv_params = self._get_server_obj(server)
|
srv_params = self._get_server_obj(server)
|
||||||
resp += body % (server,
|
resp += body % (server,
|
||||||
srv_params['display_name'], srv_params['iqn'])
|
srv_params['display_name'],
|
||||||
|
srv_params['iqn'])
|
||||||
resp += footer
|
resp += footer
|
||||||
return resp
|
return resp
|
||||||
|
|
||||||
@ -353,7 +358,7 @@ class FakeHTTPSConnection(FakeHTTPConnection):
|
|||||||
|
|
||||||
|
|
||||||
class ZadaraVPSADriverTestCase(test.TestCase):
|
class ZadaraVPSADriverTestCase(test.TestCase):
|
||||||
"""Test case for Zadara VPSA volume driver"""
|
"""Test case for Zadara VPSA volume driver."""
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
LOG.debug('Enter: setUp')
|
LOG.debug('Enter: setUp')
|
||||||
@ -428,7 +433,7 @@ class ZadaraVPSADriverTestCase(test.TestCase):
|
|||||||
self.driver.check_for_setup_error()
|
self.driver.check_for_setup_error()
|
||||||
|
|
||||||
def test_volume_attach_detach(self):
|
def test_volume_attach_detach(self):
|
||||||
"""Test volume attachment and detach"""
|
"""Test volume attachment and detach."""
|
||||||
volume = {'name': 'test_volume_01', 'size': 1, 'id': 123}
|
volume = {'name': 'test_volume_01', 'size': 1, 'id': 123}
|
||||||
connector = dict(initiator='test_iqn.1')
|
connector = dict(initiator='test_iqn.1')
|
||||||
|
|
||||||
@ -450,7 +455,7 @@ class ZadaraVPSADriverTestCase(test.TestCase):
|
|||||||
self.driver.delete_volume(volume)
|
self.driver.delete_volume(volume)
|
||||||
|
|
||||||
def test_volume_attach_multiple_detach(self):
|
def test_volume_attach_multiple_detach(self):
|
||||||
"""Test multiple volume attachment and detach"""
|
"""Test multiple volume attachment and detach."""
|
||||||
volume = {'name': 'test_volume_01', 'size': 1, 'id': 123}
|
volume = {'name': 'test_volume_01', 'size': 1, 'id': 123}
|
||||||
connector1 = dict(initiator='test_iqn.1')
|
connector1 = dict(initiator='test_iqn.1')
|
||||||
connector2 = dict(initiator='test_iqn.2')
|
connector2 = dict(initiator='test_iqn.2')
|
||||||
@ -467,7 +472,7 @@ class ZadaraVPSADriverTestCase(test.TestCase):
|
|||||||
self.driver.delete_volume(volume)
|
self.driver.delete_volume(volume)
|
||||||
|
|
||||||
def test_wrong_attach_params(self):
|
def test_wrong_attach_params(self):
|
||||||
"""Test different wrong attach scenarios"""
|
"""Test different wrong attach scenarios."""
|
||||||
volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101}
|
volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101}
|
||||||
volume2 = {'name': 'test_volume_02', 'size': 1, 'id': 102}
|
volume2 = {'name': 'test_volume_02', 'size': 1, 'id': 102}
|
||||||
volume3 = {'name': 'test_volume_03', 'size': 1, 'id': 103}
|
volume3 = {'name': 'test_volume_03', 'size': 1, 'id': 103}
|
||||||
@ -480,7 +485,7 @@ class ZadaraVPSADriverTestCase(test.TestCase):
|
|||||||
volume1, connector1)
|
volume1, connector1)
|
||||||
|
|
||||||
def test_wrong_detach_params(self):
|
def test_wrong_detach_params(self):
|
||||||
"""Test different wrong detachment scenarios"""
|
"""Test different wrong detachment scenarios."""
|
||||||
|
|
||||||
volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101}
|
volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101}
|
||||||
volume2 = {'name': 'test_volume_02', 'size': 1, 'id': 102}
|
volume2 = {'name': 'test_volume_02', 'size': 1, 'id': 102}
|
||||||
@ -505,7 +510,7 @@ class ZadaraVPSADriverTestCase(test.TestCase):
|
|||||||
volume1, connector2)
|
volume1, connector2)
|
||||||
|
|
||||||
def test_wrong_login_reply(self):
|
def test_wrong_login_reply(self):
|
||||||
"""Test wrong login reply"""
|
"""Test wrong login reply."""
|
||||||
|
|
||||||
RUNTIME_VARS['login'] = """<hash>
|
RUNTIME_VARS['login'] = """<hash>
|
||||||
<access-key>%s</access-key>
|
<access-key>%s</access-key>
|
||||||
@ -530,13 +535,13 @@ class ZadaraVPSADriverTestCase(test.TestCase):
|
|||||||
self.driver.do_setup, None)
|
self.driver.do_setup, None)
|
||||||
|
|
||||||
def test_ssl_use(self):
|
def test_ssl_use(self):
|
||||||
"""Coverage test for SSL connection"""
|
"""Coverage test for SSL connection."""
|
||||||
self.flags(zadara_vpsa_use_ssl=True)
|
self.flags(zadara_vpsa_use_ssl=True)
|
||||||
self.driver.do_setup(None)
|
self.driver.do_setup(None)
|
||||||
self.flags(zadara_vpsa_use_ssl=False)
|
self.flags(zadara_vpsa_use_ssl=False)
|
||||||
|
|
||||||
def test_bad_http_response(self):
|
def test_bad_http_response(self):
|
||||||
"""Coverage test for non-good HTTP response"""
|
"""Coverage test for non-good HTTP response."""
|
||||||
RUNTIME_VARS['status'] = 400
|
RUNTIME_VARS['status'] = 400
|
||||||
|
|
||||||
volume = {'name': 'test_volume_01', 'size': 1}
|
volume = {'name': 'test_volume_01', 'size': 1}
|
||||||
@ -544,7 +549,7 @@ class ZadaraVPSADriverTestCase(test.TestCase):
|
|||||||
self.driver.create_volume, volume)
|
self.driver.create_volume, volume)
|
||||||
|
|
||||||
def test_delete_without_detach(self):
|
def test_delete_without_detach(self):
|
||||||
"""Test volume deletion without detach"""
|
"""Test volume deletion without detach."""
|
||||||
|
|
||||||
volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101}
|
volume1 = {'name': 'test_volume_01', 'size': 1, 'id': 101}
|
||||||
connector1 = dict(initiator='test_iqn.1')
|
connector1 = dict(initiator='test_iqn.1')
|
||||||
|
@ -43,9 +43,9 @@ class BaseTestCase(cinder.test.TestCase):
|
|||||||
super(BaseTestCase, self).tearDown()
|
super(BaseTestCase, self).tearDown()
|
||||||
|
|
||||||
has_errors = len([test for (test, msgs) in self._currentResult.errors
|
has_errors = len([test for (test, msgs) in self._currentResult.errors
|
||||||
if test.id() == self.id()]) > 0
|
if test.id() == self.id()]) > 0
|
||||||
failed = len([test for (test, msgs) in self._currentResult.failures
|
failed = len([test for (test, msgs) in self._currentResult.failures
|
||||||
if test.id() == self.id()]) > 0
|
if test.id() == self.id()]) > 0
|
||||||
|
|
||||||
if not has_errors and not failed:
|
if not has_errors and not failed:
|
||||||
self._save_mock_proxies()
|
self._save_mock_proxies()
|
||||||
@ -61,7 +61,7 @@ class BaseTestCase(cinder.test.TestCase):
|
|||||||
test_name = test_name[len(prefix):]
|
test_name = test_name[len(prefix):]
|
||||||
file_name = '{0}_{1}.p.gz'.format(test_name, mock_name)
|
file_name = '{0}_{1}.p.gz'.format(test_name, mock_name)
|
||||||
return os.path.join(os.path.dirname(mockproxy.__file__),
|
return os.path.join(os.path.dirname(mockproxy.__file__),
|
||||||
"stubs", file_name)
|
"stubs", file_name)
|
||||||
|
|
||||||
def _load_mock(self, name):
|
def _load_mock(self, name):
|
||||||
path = self._get_stub_file_path(self.id(), name)
|
path = self._get_stub_file_path(self.id(), name)
|
||||||
@ -72,9 +72,9 @@ class BaseTestCase(cinder.test.TestCase):
|
|||||||
|
|
||||||
def _load_mock_or_create_proxy(self, module_name):
|
def _load_mock_or_create_proxy(self, module_name):
|
||||||
m = None
|
m = None
|
||||||
if not gen_test_mocks_key in os.environ or \
|
if (not gen_test_mocks_key in os.environ or
|
||||||
os.environ[gen_test_mocks_key].lower() \
|
os.environ[gen_test_mocks_key].lower()
|
||||||
not in ['true', 'yes', '1']:
|
not in ['true', 'yes', '1']):
|
||||||
m = self._load_mock(module_name)
|
m = self._load_mock(module_name)
|
||||||
else:
|
else:
|
||||||
module = __import__(module_name)
|
module = __import__(module_name)
|
||||||
|
@ -20,23 +20,17 @@ Stubouts, mocks and fixtures for windows volume test suite
|
|||||||
|
|
||||||
|
|
||||||
def get_fake_volume_info(name):
|
def get_fake_volume_info(name):
|
||||||
return {
|
return {'name': name,
|
||||||
'name': name,
|
'size': 1,
|
||||||
'size': 1,
|
'provider_location': 'iqn.2010-10.org.openstack:' + name,
|
||||||
'provider_location': 'iqn.2010-10.org.openstack:' + name,
|
'id': 1,
|
||||||
'id': 1,
|
'provider_auth': None}
|
||||||
'provider_auth': None
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_fake_snapshot_info(volume_name, snapshot_name):
|
def get_fake_snapshot_info(volume_name, snapshot_name):
|
||||||
return {
|
return {'name': snapshot_name,
|
||||||
'name': snapshot_name,
|
'volume_name': volume_name, }
|
||||||
'volume_name': volume_name,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_fake_connector_info(initiator):
|
def get_fake_connector_info(initiator):
|
||||||
return {
|
return {'initiator': initiator, }
|
||||||
'initiator': initiator,
|
|
||||||
}
|
|
||||||
|
@ -44,7 +44,7 @@ def serialize_obj(obj):
|
|||||||
|
|
||||||
|
|
||||||
def serialize_args(*args, **kwargs):
|
def serialize_args(*args, **kwargs):
|
||||||
"""Workaround for float string conversion issues in Python 2.6"""
|
"""Workaround for float string conversion issues in Python 2.6."""
|
||||||
return serialize_obj((args, kwargs))
|
return serialize_obj((args, kwargs))
|
||||||
|
|
||||||
|
|
||||||
@ -113,8 +113,10 @@ class MockProxy(object):
|
|||||||
self._recorded_values = {}
|
self._recorded_values = {}
|
||||||
|
|
||||||
def _get_proxy_object(self, obj):
|
def _get_proxy_object(self, obj):
|
||||||
if hasattr(obj, '__dict__') or isinstance(obj, tuple) or \
|
if (hasattr(obj, '__dict__') or
|
||||||
isinstance(obj, list) or isinstance(obj, dict):
|
isinstance(obj, tuple) or
|
||||||
|
isinstance(obj, list) or
|
||||||
|
isinstance(obj, dict)):
|
||||||
p = MockProxy(obj)
|
p = MockProxy(obj)
|
||||||
else:
|
else:
|
||||||
p = obj
|
p = obj
|
||||||
@ -125,8 +127,9 @@ class MockProxy(object):
|
|||||||
return object.__getattribute__(self, name)
|
return object.__getattribute__(self, name)
|
||||||
else:
|
else:
|
||||||
attr = getattr(self._wrapped, name)
|
attr = getattr(self._wrapped, name)
|
||||||
if inspect.isfunction(attr) or inspect.ismethod(attr) or \
|
if (inspect.isfunction(attr) or
|
||||||
inspect.isbuiltin(attr):
|
inspect.ismethod(attr) or
|
||||||
|
inspect.isbuiltin(attr)):
|
||||||
def newfunc(*args, **kwargs):
|
def newfunc(*args, **kwargs):
|
||||||
result = attr(*args, **kwargs)
|
result = attr(*args, **kwargs)
|
||||||
p = self._get_proxy_object(result)
|
p = self._get_proxy_object(result)
|
||||||
@ -134,8 +137,9 @@ class MockProxy(object):
|
|||||||
self._add_recorded_ret_value(name, params, p)
|
self._add_recorded_ret_value(name, params, p)
|
||||||
return p
|
return p
|
||||||
return newfunc
|
return newfunc
|
||||||
elif hasattr(attr, '__dict__') or (hasattr(attr, '__getitem__')
|
elif (hasattr(attr, '__dict__') or
|
||||||
and not (isinstance(attr, str) or isinstance(attr, unicode))):
|
(hasattr(attr, '__getitem__') and not
|
||||||
|
(isinstance(attr, str) or isinstance(attr, unicode)))):
|
||||||
p = MockProxy(attr)
|
p = MockProxy(attr)
|
||||||
else:
|
else:
|
||||||
p = attr
|
p = attr
|
||||||
|
@ -48,13 +48,13 @@ class WindowsUtils(object):
|
|||||||
return self.__conn_wmi
|
return self.__conn_wmi
|
||||||
|
|
||||||
def find_vhd_by_name(self, name):
|
def find_vhd_by_name(self, name):
|
||||||
''' Finds a volume by its name.'''
|
'''Finds a volume by its name.'''
|
||||||
|
|
||||||
wt_disks = self._conn_wmi.WT_Disk(Description=name)
|
wt_disks = self._conn_wmi.WT_Disk(Description=name)
|
||||||
return wt_disks
|
return wt_disks
|
||||||
|
|
||||||
def volume_exists(self, name):
|
def volume_exists(self, name):
|
||||||
''' Checks if a volume exists.'''
|
'''Checks if a volume exists.'''
|
||||||
|
|
||||||
wt_disks = self.find_vhd_by_name(name)
|
wt_disks = self.find_vhd_by_name(name)
|
||||||
if len(wt_disks) > 0:
|
if len(wt_disks) > 0:
|
||||||
@ -62,7 +62,7 @@ class WindowsUtils(object):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def snapshot_exists(self, name):
|
def snapshot_exists(self, name):
|
||||||
''' Checks if a snapshot exists.'''
|
'''Checks if a snapshot exists.'''
|
||||||
|
|
||||||
wt_snapshots = self.find_snapshot_by_name(name)
|
wt_snapshots = self.find_snapshot_by_name(name)
|
||||||
if len(wt_snapshots) > 0:
|
if len(wt_snapshots) > 0:
|
||||||
@ -70,47 +70,47 @@ class WindowsUtils(object):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def find_snapshot_by_name(self, name):
|
def find_snapshot_by_name(self, name):
|
||||||
''' Finds a snapshot by its name.'''
|
'''Finds a snapshot by its name.'''
|
||||||
|
|
||||||
wt_snapshots = self._conn_wmi.WT_Snapshot(Description=name)
|
wt_snapshots = self._conn_wmi.WT_Snapshot(Description=name)
|
||||||
return wt_snapshots
|
return wt_snapshots
|
||||||
|
|
||||||
def delete_volume(self, name):
|
def delete_volume(self, name):
|
||||||
''' Deletes a volume.'''
|
'''Deletes a volume.'''
|
||||||
|
|
||||||
wt_disk = self._conn_wmi.WT_Disk(Description=name)[0]
|
wt_disk = self._conn_wmi.WT_Disk(Description=name)[0]
|
||||||
wt_disk.Delete_()
|
wt_disk.Delete_()
|
||||||
vhdfiles = self._conn_cimv2.query(
|
vhdfiles = self._conn_cimv2.query(
|
||||||
"Select * from CIM_DataFile where Name = '" +
|
"Select * from CIM_DataFile where Name = '" +
|
||||||
self._get_vhd_path(name) + "'")
|
self._get_vhd_path(name) + "'")
|
||||||
if len(vhdfiles) > 0:
|
if len(vhdfiles) > 0:
|
||||||
vhdfiles[0].Delete()
|
vhdfiles[0].Delete()
|
||||||
|
|
||||||
def _get_vhd_path(self, volume_name):
|
def _get_vhd_path(self, volume_name):
|
||||||
''' Gets the path disk of the volume'''
|
'''Gets the path disk of the volume.'''
|
||||||
|
|
||||||
base_vhd_folder = FLAGS.windows_iscsi_lun_path
|
base_vhd_folder = FLAGS.windows_iscsi_lun_path
|
||||||
return os.path.join(base_vhd_folder, volume_name + ".vhd")
|
return os.path.join(base_vhd_folder, volume_name + ".vhd")
|
||||||
|
|
||||||
def delete_snapshot(self, name):
|
def delete_snapshot(self, name):
|
||||||
''' Deletes a snapshot.'''
|
'''Deletes a snapshot.'''
|
||||||
|
|
||||||
wt_snapshot = self._conn_wmi.WT_Snapshot(Description=name)[0]
|
wt_snapshot = self._conn_wmi.WT_Snapshot(Description=name)[0]
|
||||||
wt_snapshot.Delete_()
|
wt_snapshot.Delete_()
|
||||||
vhdfile = self._conn_cimv2.query(
|
vhdfile = self._conn_cimv2.query(
|
||||||
"Select * from CIM_DataFile where Name = '" +
|
"Select * from CIM_DataFile where Name = '" +
|
||||||
self._get_vhd_path(name) + "'")[0]
|
self._get_vhd_path(name) + "'")[0]
|
||||||
vhdfile.Delete()
|
vhdfile.Delete()
|
||||||
|
|
||||||
def find_initiator_ids(self, target_name, initiator_name):
|
def find_initiator_ids(self, target_name, initiator_name):
|
||||||
''' Finds a initiator id by its name.'''
|
'''Finds a initiator id by its name.'''
|
||||||
wt_idmethod = self._conn_wmi.WT_IDMethod(HostName=target_name,
|
wt_idmethod = self._conn_wmi.WT_IDMethod(HostName=target_name,
|
||||||
Method=4,
|
Method=4,
|
||||||
Value=initiator_name)
|
Value=initiator_name)
|
||||||
return wt_idmethod
|
return wt_idmethod
|
||||||
|
|
||||||
def initiator_id_exists(self, target_name, initiator_name):
|
def initiator_id_exists(self, target_name, initiator_name):
|
||||||
''' Checks if a initiatorId exists.'''
|
'''Checks if a initiatorId exists.'''
|
||||||
|
|
||||||
wt_idmethod = self.find_initiator_ids(target_name, initiator_name)
|
wt_idmethod = self.find_initiator_ids(target_name, initiator_name)
|
||||||
if len(wt_idmethod) > 0:
|
if len(wt_idmethod) > 0:
|
||||||
@ -118,13 +118,13 @@ class WindowsUtils(object):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def find_exports(self, target_name):
|
def find_exports(self, target_name):
|
||||||
''' Finds a export id by its name.'''
|
'''Finds a export id by its name.'''
|
||||||
|
|
||||||
wt_host = self._conn_wmi.WT_Host(HostName=target_name)
|
wt_host = self._conn_wmi.WT_Host(HostName=target_name)
|
||||||
return wt_host
|
return wt_host
|
||||||
|
|
||||||
def export_exists(self, target_name):
|
def export_exists(self, target_name):
|
||||||
''' Checks if a export exists.'''
|
'''Checks if a export exists.'''
|
||||||
|
|
||||||
wt_host = self.find_exports(target_name)
|
wt_host = self.find_exports(target_name)
|
||||||
if len(wt_host) > 0:
|
if len(wt_host) > 0:
|
||||||
@ -132,13 +132,13 @@ class WindowsUtils(object):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def delete_initiator_id(self, target_name, initiator_name):
|
def delete_initiator_id(self, target_name, initiator_name):
|
||||||
''' Deletes a initiatorId.'''
|
'''Deletes a initiatorId.'''
|
||||||
|
|
||||||
wt_init_id = self.find_initiator_ids(target_name, initiator_name)[0]
|
wt_init_id = self.find_initiator_ids(target_name, initiator_name)[0]
|
||||||
wt_init_id.Delete_()
|
wt_init_id.Delete_()
|
||||||
|
|
||||||
def delete_export(self, target_name):
|
def delete_export(self, target_name):
|
||||||
''' Deletes an export.'''
|
'''Deletes an export.'''
|
||||||
|
|
||||||
wt_host = self.find_exports(target_name)[0]
|
wt_host = self.find_exports(target_name)[0]
|
||||||
wt_host.RemoveAllWTDisks()
|
wt_host.RemoveAllWTDisks()
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user