Re-enable -1 child limits for nested quotas
Add back support for -1 limits of child projects. The way that we support the -1 child limits requires the following changes: * Continue quota validation up the hierarchy if the current limit is -1 until we hit a hard limit or no more parents, and update the any relevant parents' allocated value along the way * When updating limits, special care needs to be taken when updating child limit to be -1, or when changing from a -1 limit * Enable support for creating reservations for "allocated" values to support the scenario that: - a volume is created on a project with a limit of -1 - the parent's allocated value has been updated appropriately - the volume create fails and the child's in_use quota rolls back - now we must also rollback the parent's allocated value NOTE: There is a race condition between validation the NestedQuotas and when the driver may be switched into use, and if -1 quotas are used the validation could be out of date. Will look into better support for switching on of NestedQuotas on live deployment with -1 limits, which would likely leverage the "allocated" reservation system. Closes-Bug: #1548645 Closes-Bug: #1544774 Closes-Bug: #1537189 Change-Id: I2d1dba87baf3595cc8f48574e0281ac17509fe7d
This commit is contained in:
parent
d3fd2828f6
commit
c02336e4dd
@ -61,46 +61,18 @@ class QuotaSetsController(wsgi.Controller):
|
|||||||
return dict(quota_set=quota_set)
|
return dict(quota_set=quota_set)
|
||||||
|
|
||||||
def _validate_existing_resource(self, key, value, quota_values):
|
def _validate_existing_resource(self, key, value, quota_values):
|
||||||
if key == 'per_volume_gigabytes':
|
# -1 limit will always be greater than the existing value
|
||||||
|
if key == 'per_volume_gigabytes' or value == -1:
|
||||||
return
|
return
|
||||||
v = quota_values.get(key, {})
|
v = quota_values.get(key, {})
|
||||||
if value < (v.get('in_use', 0) + v.get('reserved', 0)):
|
used = (v.get('in_use', 0) + v.get('reserved', 0))
|
||||||
|
if QUOTAS.using_nested_quotas():
|
||||||
|
used += v.get('allocated', 0)
|
||||||
|
if value < used:
|
||||||
msg = _("Quota %s limit must be equal or greater than existing "
|
msg = _("Quota %s limit must be equal or greater than existing "
|
||||||
"resources.") % key
|
"resources.") % key
|
||||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||||
|
|
||||||
def _validate_quota_limit(self, quota, key, project_quotas=None,
|
|
||||||
parent_project_quotas=None):
|
|
||||||
limit = self.validate_integer(quota[key], key, min_value=-1,
|
|
||||||
max_value=db.MAX_INT)
|
|
||||||
|
|
||||||
# If a parent quota is unlimited (-1) no validation needs to happen
|
|
||||||
# for the amount of existing free quota
|
|
||||||
# TODO(mc_nair): will need to recurse up for nested quotas once
|
|
||||||
# -1 child project values are enabled
|
|
||||||
if parent_project_quotas and parent_project_quotas[key]['limit'] != -1:
|
|
||||||
free_quota = (parent_project_quotas[key]['limit'] -
|
|
||||||
parent_project_quotas[key]['in_use'] -
|
|
||||||
parent_project_quotas[key]['reserved'] -
|
|
||||||
parent_project_quotas[key].get('allocated', 0))
|
|
||||||
|
|
||||||
current = 0
|
|
||||||
if project_quotas.get(key):
|
|
||||||
current = project_quotas[key]['limit']
|
|
||||||
# -1 limit doesn't change free quota available in parent
|
|
||||||
if current == -1:
|
|
||||||
current = 0
|
|
||||||
|
|
||||||
# Add back the existing quota limit (if any is set) from the
|
|
||||||
# current free quota since it will be getting reset and is part
|
|
||||||
# of the parent's allocated value
|
|
||||||
free_quota += current
|
|
||||||
|
|
||||||
if limit > free_quota:
|
|
||||||
msg = _("Free quota available is %s.") % free_quota
|
|
||||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
|
||||||
return limit
|
|
||||||
|
|
||||||
def _get_quotas(self, context, id, usages=False):
|
def _get_quotas(self, context, id, usages=False):
|
||||||
values = QUOTAS.get_project_quotas(context, id, usages=usages)
|
values = QUOTAS.get_project_quotas(context, id, usages=usages)
|
||||||
|
|
||||||
@ -272,7 +244,7 @@ class QuotaSetsController(wsgi.Controller):
|
|||||||
# Get the parent_id of the target project to verify whether we are
|
# Get the parent_id of the target project to verify whether we are
|
||||||
# dealing with hierarchical namespace or non-hierarchical namespace
|
# dealing with hierarchical namespace or non-hierarchical namespace
|
||||||
target_project = quota_utils.get_project_hierarchy(
|
target_project = quota_utils.get_project_hierarchy(
|
||||||
context, target_project_id)
|
context, target_project_id, parents_as_ids=True)
|
||||||
parent_id = target_project.parent_id
|
parent_id = target_project.parent_id
|
||||||
|
|
||||||
if parent_id:
|
if parent_id:
|
||||||
@ -283,8 +255,6 @@ class QuotaSetsController(wsgi.Controller):
|
|||||||
self._authorize_update_or_delete(context_project,
|
self._authorize_update_or_delete(context_project,
|
||||||
target_project.id,
|
target_project.id,
|
||||||
parent_id)
|
parent_id)
|
||||||
parent_project_quotas = QUOTAS.get_project_quotas(
|
|
||||||
context, parent_id)
|
|
||||||
|
|
||||||
# NOTE(ankit): Pass #2 - In this loop for body['quota_set'].keys(),
|
# NOTE(ankit): Pass #2 - In this loop for body['quota_set'].keys(),
|
||||||
# we validate the quota limits to ensure that we can bail out if
|
# we validate the quota limits to ensure that we can bail out if
|
||||||
@ -294,34 +264,29 @@ class QuotaSetsController(wsgi.Controller):
|
|||||||
quota_values = QUOTAS.get_project_quotas(context, target_project_id,
|
quota_values = QUOTAS.get_project_quotas(context, target_project_id,
|
||||||
defaults=False)
|
defaults=False)
|
||||||
valid_quotas = {}
|
valid_quotas = {}
|
||||||
allocated_quotas = {}
|
reservations = []
|
||||||
for key in body['quota_set'].keys():
|
for key in body['quota_set'].keys():
|
||||||
if key in NON_QUOTA_KEYS:
|
if key in NON_QUOTA_KEYS:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not skip_flag:
|
value = self.validate_integer(
|
||||||
|
body['quota_set'][key], key, min_value=-1,
|
||||||
|
max_value=db.MAX_INT)
|
||||||
|
|
||||||
|
# Can't skip the validation of nested quotas since it could mess up
|
||||||
|
# hierarchy if parent limit is less than childrens' current usage
|
||||||
|
if not skip_flag or use_nested_quotas:
|
||||||
self._validate_existing_resource(key, value, quota_values)
|
self._validate_existing_resource(key, value, quota_values)
|
||||||
|
|
||||||
if use_nested_quotas and parent_id:
|
if use_nested_quotas:
|
||||||
value = self._validate_quota_limit(body['quota_set'], key,
|
try:
|
||||||
quota_values,
|
reservations += self._update_nested_quota_allocated(
|
||||||
parent_project_quotas)
|
context, target_project, quota_values, key, value)
|
||||||
|
except exception.OverQuota as e:
|
||||||
|
if reservations:
|
||||||
|
db.reservation_rollback(context, reservations)
|
||||||
|
raise webob.exc.HTTPBadRequest(explanation=e.message)
|
||||||
|
|
||||||
if value < 0:
|
|
||||||
# TODO(mc_nair): extend to handle -1 limits and recurse up
|
|
||||||
# the hierarchy
|
|
||||||
msg = _("Quota can't be set to -1 for child projects.")
|
|
||||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
|
||||||
|
|
||||||
original_quota = 0
|
|
||||||
if quota_values.get(key):
|
|
||||||
original_quota = quota_values[key]['limit']
|
|
||||||
|
|
||||||
allocated_quotas[key] = (
|
|
||||||
parent_project_quotas[key].get('allocated', 0) + value -
|
|
||||||
original_quota)
|
|
||||||
else:
|
|
||||||
value = self._validate_quota_limit(body['quota_set'], key)
|
|
||||||
valid_quotas[key] = value
|
valid_quotas[key] = value
|
||||||
|
|
||||||
# NOTE(ankit): Pass #3 - At this point we know that all the keys and
|
# NOTE(ankit): Pass #3 - At this point we know that all the keys and
|
||||||
@ -335,21 +300,41 @@ class QuotaSetsController(wsgi.Controller):
|
|||||||
db.quota_create(context, target_project_id, key, value)
|
db.quota_create(context, target_project_id, key, value)
|
||||||
except exception.AdminRequired:
|
except exception.AdminRequired:
|
||||||
raise webob.exc.HTTPForbidden()
|
raise webob.exc.HTTPForbidden()
|
||||||
# If hierarchical projects, update child's quota first
|
|
||||||
# and then parents quota. In future this needs to be an
|
|
||||||
# atomic operation.
|
|
||||||
if use_nested_quotas and parent_id:
|
|
||||||
if key in allocated_quotas.keys():
|
|
||||||
try:
|
|
||||||
db.quota_allocated_update(context, parent_id, key,
|
|
||||||
allocated_quotas[key])
|
|
||||||
except exception.ProjectQuotaNotFound:
|
|
||||||
parent_limit = parent_project_quotas[key]['limit']
|
|
||||||
db.quota_create(context, parent_id, key, parent_limit,
|
|
||||||
allocated=allocated_quotas[key])
|
|
||||||
|
|
||||||
|
if reservations:
|
||||||
|
db.reservation_commit(context, reservations)
|
||||||
return {'quota_set': self._get_quotas(context, target_project_id)}
|
return {'quota_set': self._get_quotas(context, target_project_id)}
|
||||||
|
|
||||||
|
def _get_quota_usage(self, quota_obj):
|
||||||
|
return (quota_obj.get('in_use', 0) + quota_obj.get('allocated', 0) +
|
||||||
|
quota_obj.get('reserved', 0))
|
||||||
|
|
||||||
|
def _update_nested_quota_allocated(self, ctxt, target_project,
|
||||||
|
target_project_quotas, res, new_limit):
|
||||||
|
reservations = []
|
||||||
|
# per_volume_gigabytes doesn't make sense to nest
|
||||||
|
if res == "per_volume_gigabytes":
|
||||||
|
return reservations
|
||||||
|
|
||||||
|
quota_for_res = target_project_quotas.get(res, {})
|
||||||
|
orig_quota_from_target_proj = quota_for_res.get('limit', 0)
|
||||||
|
# If limit was -1, we were "taking" current child's usage from parent
|
||||||
|
if orig_quota_from_target_proj == -1:
|
||||||
|
orig_quota_from_target_proj = self._get_quota_usage(quota_for_res)
|
||||||
|
|
||||||
|
new_quota_from_target_proj = new_limit
|
||||||
|
# If we set limit to -1, we will "take" the current usage from parent
|
||||||
|
if new_limit == -1:
|
||||||
|
new_quota_from_target_proj = self._get_quota_usage(quota_for_res)
|
||||||
|
|
||||||
|
res_change = new_quota_from_target_proj - orig_quota_from_target_proj
|
||||||
|
if res_change != 0:
|
||||||
|
deltas = {res: res_change}
|
||||||
|
reservations += quota_utils.update_alloc_to_next_hard_limit(
|
||||||
|
ctxt, QUOTAS.resources, deltas, res, None, target_project.id)
|
||||||
|
|
||||||
|
return reservations
|
||||||
|
|
||||||
@wsgi.serializers(xml=QuotaTemplate)
|
@wsgi.serializers(xml=QuotaTemplate)
|
||||||
def defaults(self, req, id):
|
def defaults(self, req, id):
|
||||||
context = req.environ['cinder.context']
|
context = req.environ['cinder.context']
|
||||||
@ -396,9 +381,9 @@ class QuotaSetsController(wsgi.Controller):
|
|||||||
# If the project which is being deleted has allocated part of its
|
# If the project which is being deleted has allocated part of its
|
||||||
# quota to its subprojects, then subprojects' quotas should be
|
# quota to its subprojects, then subprojects' quotas should be
|
||||||
# deleted first.
|
# deleted first.
|
||||||
for key, value in project_quotas.items():
|
for res, value in project_quotas.items():
|
||||||
if 'allocated' in project_quotas[key].keys():
|
if 'allocated' in project_quotas[res].keys():
|
||||||
if project_quotas[key]['allocated'] != 0:
|
if project_quotas[res]['allocated'] != 0:
|
||||||
msg = _("About to delete child projects having "
|
msg = _("About to delete child projects having "
|
||||||
"non-zero quota. This should not be performed")
|
"non-zero quota. This should not be performed")
|
||||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||||
@ -411,25 +396,17 @@ class QuotaSetsController(wsgi.Controller):
|
|||||||
self._authorize_update_or_delete(context_project,
|
self._authorize_update_or_delete(context_project,
|
||||||
target_project.id,
|
target_project.id,
|
||||||
parent_id)
|
parent_id)
|
||||||
parent_project_quotas = QUOTAS.get_project_quotas(
|
|
||||||
ctxt, parent_id)
|
|
||||||
|
|
||||||
# Delete child quota first and later update parent's quota.
|
|
||||||
try:
|
try:
|
||||||
db.quota_destroy_by_project(ctxt, target_project.id)
|
db.quota_destroy_by_project(ctxt, target_project.id)
|
||||||
except exception.AdminRequired:
|
except exception.AdminRequired:
|
||||||
raise webob.exc.HTTPForbidden()
|
raise webob.exc.HTTPForbidden()
|
||||||
|
|
||||||
# The parent "gives" quota to its child using the "allocated" value
|
for res, limit in project_quotas.items():
|
||||||
# and since the child project is getting deleted, we should restore
|
# Update child limit to 0 so the parent hierarchy gets it's
|
||||||
# the child projects quota to the parent quota, but lowering it's
|
# allocated values updated properly
|
||||||
# allocated value
|
self._update_nested_quota_allocated(
|
||||||
for key, value in project_quotas.items():
|
ctxt, target_project, project_quotas, res, 0)
|
||||||
project_hard_limit = project_quotas[key]['limit']
|
|
||||||
parent_allocated = parent_project_quotas[key]['allocated']
|
|
||||||
parent_allocated -= project_hard_limit
|
|
||||||
db.quota_allocated_update(ctxt, parent_id, key,
|
|
||||||
parent_allocated)
|
|
||||||
|
|
||||||
def validate_setup_for_nested_quota_use(self, req):
|
def validate_setup_for_nested_quota_use(self, req):
|
||||||
"""Validates that the setup supports using nested quotas.
|
"""Validates that the setup supports using nested quotas.
|
||||||
|
@ -836,10 +836,12 @@ def quota_usage_get_all_by_project(context, project_id):
|
|||||||
|
|
||||||
|
|
||||||
def quota_reserve(context, resources, quotas, deltas, expire,
|
def quota_reserve(context, resources, quotas, deltas, expire,
|
||||||
until_refresh, max_age, project_id=None):
|
until_refresh, max_age, project_id=None,
|
||||||
|
is_allocated_reserve=False):
|
||||||
"""Check quotas and create appropriate reservations."""
|
"""Check quotas and create appropriate reservations."""
|
||||||
return IMPL.quota_reserve(context, resources, quotas, deltas, expire,
|
return IMPL.quota_reserve(context, resources, quotas, deltas, expire,
|
||||||
until_refresh, max_age, project_id=project_id)
|
until_refresh, max_age, project_id=project_id,
|
||||||
|
is_allocated_reserve=is_allocated_reserve)
|
||||||
|
|
||||||
|
|
||||||
def reservation_commit(context, reservations, project_id=None):
|
def reservation_commit(context, reservations, project_id=None):
|
||||||
|
@ -783,14 +783,16 @@ def _quota_usage_create(context, project_id, resource, in_use, reserved,
|
|||||||
|
|
||||||
|
|
||||||
def _reservation_create(context, uuid, usage, project_id, resource, delta,
|
def _reservation_create(context, uuid, usage, project_id, resource, delta,
|
||||||
expire, session=None):
|
expire, session=None, allocated_id=None):
|
||||||
|
usage_id = usage['id'] if usage else None
|
||||||
reservation_ref = models.Reservation()
|
reservation_ref = models.Reservation()
|
||||||
reservation_ref.uuid = uuid
|
reservation_ref.uuid = uuid
|
||||||
reservation_ref.usage_id = usage['id']
|
reservation_ref.usage_id = usage_id
|
||||||
reservation_ref.project_id = project_id
|
reservation_ref.project_id = project_id
|
||||||
reservation_ref.resource = resource
|
reservation_ref.resource = resource
|
||||||
reservation_ref.delta = delta
|
reservation_ref.delta = delta
|
||||||
reservation_ref.expire = expire
|
reservation_ref.expire = expire
|
||||||
|
reservation_ref.allocated_id = allocated_id
|
||||||
reservation_ref.save(session=session)
|
reservation_ref.save(session=session)
|
||||||
return reservation_ref
|
return reservation_ref
|
||||||
|
|
||||||
@ -838,7 +840,8 @@ def quota_usage_update_resource(context, old_res, new_res):
|
|||||||
@require_context
|
@require_context
|
||||||
@_retry_on_deadlock
|
@_retry_on_deadlock
|
||||||
def quota_reserve(context, resources, quotas, deltas, expire,
|
def quota_reserve(context, resources, quotas, deltas, expire,
|
||||||
until_refresh, max_age, project_id=None):
|
until_refresh, max_age, project_id=None,
|
||||||
|
is_allocated_reserve=False):
|
||||||
elevated = context.elevated()
|
elevated = context.elevated()
|
||||||
session = get_session()
|
session = get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
@ -919,8 +922,14 @@ def quota_reserve(context, resources, quotas, deltas, expire,
|
|||||||
# a best-effort mechanism.
|
# a best-effort mechanism.
|
||||||
|
|
||||||
# Check for deltas that would go negative
|
# Check for deltas that would go negative
|
||||||
unders = [r for r, delta in deltas.items()
|
if is_allocated_reserve:
|
||||||
if delta < 0 and delta + usages[r].in_use < 0]
|
unders = [r for r, delta in deltas.items()
|
||||||
|
if delta < 0 and delta + allocated.get(r, 0) < 0]
|
||||||
|
else:
|
||||||
|
unders = [r for r, delta in deltas.items()
|
||||||
|
if delta < 0 and delta + usages[r].in_use < 0]
|
||||||
|
|
||||||
|
# TODO(mc_nair): Should ignore/zero alloc if using non-nested driver
|
||||||
|
|
||||||
# Now, let's check the quotas
|
# Now, let's check the quotas
|
||||||
# NOTE(Vek): We're only concerned about positive increments.
|
# NOTE(Vek): We're only concerned about positive increments.
|
||||||
@ -942,12 +951,26 @@ def quota_reserve(context, resources, quotas, deltas, expire,
|
|||||||
if not overs:
|
if not overs:
|
||||||
reservations = []
|
reservations = []
|
||||||
for resource, delta in deltas.items():
|
for resource, delta in deltas.items():
|
||||||
reservation = _reservation_create(elevated,
|
usage = usages[resource]
|
||||||
str(uuid.uuid4()),
|
allocated_id = None
|
||||||
usages[resource],
|
if is_allocated_reserve:
|
||||||
project_id,
|
try:
|
||||||
resource, delta, expire,
|
quota = _quota_get(context, project_id, resource,
|
||||||
session=session)
|
session=session)
|
||||||
|
except exception.ProjectQuotaNotFound:
|
||||||
|
# If we were using the default quota, create DB entry
|
||||||
|
quota = quota_create(context, project_id, resource,
|
||||||
|
quotas[resource], 0)
|
||||||
|
# Since there's no reserved/total for allocated, update
|
||||||
|
# allocated immediately and subtract on rollback if needed
|
||||||
|
quota_allocated_update(context, project_id, resource,
|
||||||
|
quota.allocated + delta)
|
||||||
|
allocated_id = quota.id
|
||||||
|
usage = None
|
||||||
|
reservation = _reservation_create(
|
||||||
|
elevated, str(uuid.uuid4()), usage, project_id, resource,
|
||||||
|
delta, expire, session=session, allocated_id=allocated_id)
|
||||||
|
|
||||||
reservations.append(reservation.uuid)
|
reservations.append(reservation.uuid)
|
||||||
|
|
||||||
# Also update the reserved quantity
|
# Also update the reserved quantity
|
||||||
@ -962,14 +985,15 @@ def quota_reserve(context, resources, quotas, deltas, expire,
|
|||||||
#
|
#
|
||||||
# To prevent this, we only update the
|
# To prevent this, we only update the
|
||||||
# reserved value if the delta is positive.
|
# reserved value if the delta is positive.
|
||||||
if delta > 0:
|
if delta > 0 and not is_allocated_reserve:
|
||||||
usages[resource].reserved += delta
|
usages[resource].reserved += delta
|
||||||
|
|
||||||
if unders:
|
if unders:
|
||||||
LOG.warning(_LW("Change will make usage less than 0 for the following "
|
LOG.warning(_LW("Change will make usage less than 0 for the following "
|
||||||
"resources: %s"), unders)
|
"resources: %s"), unders)
|
||||||
if overs:
|
if overs:
|
||||||
usages = {k: dict(in_use=v['in_use'], reserved=v['reserved'])
|
usages = {k: dict(in_use=v.in_use, reserved=v.reserved,
|
||||||
|
allocated=allocated.get(k, 0))
|
||||||
for k, v in usages.items()}
|
for k, v in usages.items()}
|
||||||
raise exception.OverQuota(overs=sorted(overs), quotas=quotas,
|
raise exception.OverQuota(overs=sorted(overs), quotas=quotas,
|
||||||
usages=usages)
|
usages=usages)
|
||||||
@ -1002,10 +1026,12 @@ def reservation_commit(context, reservations, project_id=None):
|
|||||||
usages = _dict_with_usage_id(usages)
|
usages = _dict_with_usage_id(usages)
|
||||||
|
|
||||||
for reservation in _quota_reservations(session, context, reservations):
|
for reservation in _quota_reservations(session, context, reservations):
|
||||||
usage = usages[reservation.usage_id]
|
# Allocated reservations will have already been bumped
|
||||||
if reservation.delta >= 0:
|
if not reservation.allocated_id:
|
||||||
usage.reserved -= reservation.delta
|
usage = usages[reservation.usage_id]
|
||||||
usage.in_use += reservation.delta
|
if reservation.delta >= 0:
|
||||||
|
usage.reserved -= reservation.delta
|
||||||
|
usage.in_use += reservation.delta
|
||||||
|
|
||||||
reservation.delete(session=session)
|
reservation.delete(session=session)
|
||||||
|
|
||||||
@ -1018,9 +1044,12 @@ def reservation_rollback(context, reservations, project_id=None):
|
|||||||
usages = _get_quota_usages(context, session, project_id)
|
usages = _get_quota_usages(context, session, project_id)
|
||||||
usages = _dict_with_usage_id(usages)
|
usages = _dict_with_usage_id(usages)
|
||||||
for reservation in _quota_reservations(session, context, reservations):
|
for reservation in _quota_reservations(session, context, reservations):
|
||||||
usage = usages[reservation.usage_id]
|
if reservation.allocated_id:
|
||||||
if reservation.delta >= 0:
|
reservation.quota.allocated -= reservation.delta
|
||||||
usage.reserved -= reservation.delta
|
else:
|
||||||
|
usage = usages[reservation.usage_id]
|
||||||
|
if reservation.delta >= 0:
|
||||||
|
usage.reserved -= reservation.delta
|
||||||
|
|
||||||
reservation.delete(session=session)
|
reservation.delete(session=session)
|
||||||
|
|
||||||
@ -1089,8 +1118,12 @@ def reservation_expire(context):
|
|||||||
if results:
|
if results:
|
||||||
for reservation in results:
|
for reservation in results:
|
||||||
if reservation.delta >= 0:
|
if reservation.delta >= 0:
|
||||||
reservation.usage.reserved -= reservation.delta
|
if reservation.allocated_id:
|
||||||
reservation.usage.save(session=session)
|
reservation.quota.allocated -= reservation.delta
|
||||||
|
reservation.quota.save(session=session)
|
||||||
|
else:
|
||||||
|
reservation.usage.reserved -= reservation.delta
|
||||||
|
reservation.usage.save(session=session)
|
||||||
|
|
||||||
reservation.delete(session=session)
|
reservation.delete(session=session)
|
||||||
|
|
||||||
|
@ -0,0 +1,28 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import Column
|
||||||
|
from sqlalchemy import MetaData, Integer, Table, ForeignKey
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
"""Add allocated_id to the reservations table."""
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
reservations = Table('reservations', meta, autoload=True)
|
||||||
|
Table('quotas', meta, autoload=True)
|
||||||
|
allocated_id = Column('allocated_id', Integer, ForeignKey('quotas.id'),
|
||||||
|
nullable=True)
|
||||||
|
reservations.create_column(allocated_id)
|
||||||
|
usage_id = reservations.c.usage_id
|
||||||
|
usage_id.alter(nullable=True)
|
@ -430,7 +430,8 @@ class Reservation(BASE, CinderBase):
|
|||||||
id = Column(Integer, primary_key=True)
|
id = Column(Integer, primary_key=True)
|
||||||
uuid = Column(String(36), nullable=False)
|
uuid = Column(String(36), nullable=False)
|
||||||
|
|
||||||
usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False)
|
usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=True)
|
||||||
|
allocated_id = Column(Integer, ForeignKey('quotas.id'), nullable=True)
|
||||||
|
|
||||||
project_id = Column(String(255), index=True)
|
project_id = Column(String(255), index=True)
|
||||||
resource = Column(String(255))
|
resource = Column(String(255))
|
||||||
@ -443,6 +444,10 @@ class Reservation(BASE, CinderBase):
|
|||||||
foreign_keys=usage_id,
|
foreign_keys=usage_id,
|
||||||
primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,'
|
primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,'
|
||||||
'QuotaUsage.deleted == 0)')
|
'QuotaUsage.deleted == 0)')
|
||||||
|
quota = relationship(
|
||||||
|
"Quota",
|
||||||
|
foreign_keys=allocated_id,
|
||||||
|
primaryjoin='and_(Reservation.allocated_id == Quota.id)')
|
||||||
|
|
||||||
|
|
||||||
class Snapshot(BASE, CinderBase):
|
class Snapshot(BASE, CinderBase):
|
||||||
|
200
cinder/quota.py
200
cinder/quota.py
@ -374,7 +374,10 @@ class DbQuotaDriver(object):
|
|||||||
# quotas, but that's a pretty rare thing.
|
# quotas, but that's a pretty rare thing.
|
||||||
quotas = self._get_quotas(context, resources, deltas.keys(),
|
quotas = self._get_quotas(context, resources, deltas.keys(),
|
||||||
has_sync=True, project_id=project_id)
|
has_sync=True, project_id=project_id)
|
||||||
|
return self._reserve(context, resources, quotas, deltas, expire,
|
||||||
|
project_id)
|
||||||
|
|
||||||
|
def _reserve(self, context, resources, quotas, deltas, expire, project_id):
|
||||||
# NOTE(Vek): Most of the work here has to be done in the DB
|
# NOTE(Vek): Most of the work here has to be done in the DB
|
||||||
# API, because we have to do it in a transaction,
|
# API, because we have to do it in a transaction,
|
||||||
# which means access to the session. Since the
|
# which means access to the session. Since the
|
||||||
@ -444,20 +447,23 @@ class NestedDbQuotaDriver(DbQuotaDriver):
|
|||||||
"""Ensures project_tree has quotas that make sense as nested quotas.
|
"""Ensures project_tree has quotas that make sense as nested quotas.
|
||||||
|
|
||||||
Validates the following:
|
Validates the following:
|
||||||
* No child projects have a limit of -1
|
|
||||||
* No parent project has child_projects who have more combined quota
|
* No parent project has child_projects who have more combined quota
|
||||||
than the parent's quota limit
|
than the parent's quota limit
|
||||||
* No child quota has a larger in-use value than it's current limit
|
* No child quota has a larger in-use value than it's current limit
|
||||||
(could happen before because child default values weren't enforced)
|
(could happen before because child default values weren't enforced)
|
||||||
* All parent projects' "allocated" quotas match the sum of the limits
|
* All parent projects' "allocated" quotas match the sum of the limits
|
||||||
of its children projects
|
of its children projects
|
||||||
|
|
||||||
|
TODO(mc_nair): need a better way to "flip the switch" to use nested
|
||||||
|
quotas to make this less race-ee
|
||||||
"""
|
"""
|
||||||
|
self._allocated = {}
|
||||||
project_queue = deque(project_tree.items())
|
project_queue = deque(project_tree.items())
|
||||||
borked_allocated_quotas = {}
|
borked_allocated_quotas = {}
|
||||||
|
|
||||||
while project_queue:
|
while project_queue:
|
||||||
# Tuple of (current root node, subtree)
|
# Tuple of (current root node, subtree)
|
||||||
cur_project_id, project_subtree = project_queue.popleft()
|
cur_proj_id, project_subtree = project_queue.popleft()
|
||||||
|
|
||||||
# If we're on a leaf node, no need to do validation on it, and in
|
# If we're on a leaf node, no need to do validation on it, and in
|
||||||
# order to avoid complication trying to get its children, skip it.
|
# order to avoid complication trying to get its children, skip it.
|
||||||
@ -465,81 +471,53 @@ class NestedDbQuotaDriver(DbQuotaDriver):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
cur_project_quotas = self.get_project_quotas(
|
cur_project_quotas = self.get_project_quotas(
|
||||||
ctxt, resources, cur_project_id)
|
ctxt, resources, cur_proj_id)
|
||||||
|
|
||||||
child_project_ids = project_subtree.keys()
|
|
||||||
child_project_quotas = {child_id: self.get_project_quotas(
|
|
||||||
ctxt, resources, child_id) for child_id in child_project_ids}
|
|
||||||
|
|
||||||
# Validate each resource when compared to it's child quotas
|
# Validate each resource when compared to it's child quotas
|
||||||
for resource in cur_project_quotas.keys():
|
for resource in cur_project_quotas.keys():
|
||||||
child_limit_sum = 0
|
|
||||||
for child_id, child_quota in child_project_quotas.items():
|
|
||||||
child_limit = child_quota[resource]['limit']
|
|
||||||
# Don't want to continue validation if -1 limit for child
|
|
||||||
# TODO(mc_nair) - remove when allowing -1 for subprojects
|
|
||||||
if child_limit < 0:
|
|
||||||
msg = _("Quota limit is -1 for child project "
|
|
||||||
"'%(proj)s' for resource '%(res)s'") % {
|
|
||||||
'proj': child_id, 'res': resource
|
|
||||||
}
|
|
||||||
raise exception.InvalidNestedQuotaSetup(reason=msg)
|
|
||||||
# Handle the case that child default quotas weren't being
|
|
||||||
# properly enforced before
|
|
||||||
elif child_quota[resource].get('in_use', 0) > child_limit:
|
|
||||||
msg = _("Quota limit invalid for project '%(proj)s' "
|
|
||||||
"for resource '%(res)s': limit of %(limit)d "
|
|
||||||
"is less than in-use value of %(used)d") % {
|
|
||||||
'proj': child_id, 'res': resource,
|
|
||||||
'limit': child_limit,
|
|
||||||
'used': child_quota[resource]['in_use']
|
|
||||||
}
|
|
||||||
raise exception.InvalidNestedQuotaSetup(reason=msg)
|
|
||||||
|
|
||||||
child_limit_sum += child_quota[resource]['limit']
|
|
||||||
|
|
||||||
parent_quota = cur_project_quotas[resource]
|
parent_quota = cur_project_quotas[resource]
|
||||||
parent_limit = parent_quota['limit']
|
parent_limit = parent_quota['limit']
|
||||||
parent_usage = parent_quota['in_use']
|
parent_usage = (parent_quota['in_use'] +
|
||||||
parent_allocated = parent_quota.get('allocated', 0)
|
parent_quota['reserved'])
|
||||||
|
|
||||||
|
cur_parent_allocated = parent_quota.get('allocated', 0)
|
||||||
|
calc_parent_allocated = self._get_cur_project_allocated(
|
||||||
|
ctxt, resources[resource], {cur_proj_id: project_subtree})
|
||||||
|
|
||||||
if parent_limit > 0:
|
if parent_limit > 0:
|
||||||
parent_free_quota = parent_limit - parent_usage
|
parent_free_quota = parent_limit - parent_usage
|
||||||
if parent_free_quota < child_limit_sum:
|
if parent_free_quota < calc_parent_allocated:
|
||||||
msg = _("Sum of child limits '%(sum)s' is greater "
|
msg = _("Sum of child usage '%(sum)s' is greater "
|
||||||
"than free quota of '%(free)s' for project "
|
"than free quota of '%(free)s' for project "
|
||||||
"'%(proj)s' for resource '%(res)s'. Please "
|
"'%(proj)s' for resource '%(res)s'. Please "
|
||||||
"lower the limit for one or more of the "
|
"lower the limit or usage for one or more of "
|
||||||
"following projects: '%(child_ids)s'") % {
|
"the following projects: '%(child_ids)s'") % {
|
||||||
'sum': child_limit_sum, 'free': parent_free_quota,
|
'sum': calc_parent_allocated,
|
||||||
'proj': cur_project_id, 'res': resource,
|
'free': parent_free_quota,
|
||||||
'child_ids': ', '.join(child_project_ids)
|
'proj': cur_proj_id, 'res': resource,
|
||||||
|
'child_ids': ', '.join(project_subtree.keys())
|
||||||
}
|
}
|
||||||
raise exception.InvalidNestedQuotaSetup(reason=msg)
|
raise exception.InvalidNestedQuotaSetup(reason=msg)
|
||||||
|
|
||||||
# Deal with the fact that using -1 limits in the past may
|
# If "allocated" value wasn't right either err or fix DB
|
||||||
# have messed some allocated values in DB
|
if calc_parent_allocated != cur_parent_allocated:
|
||||||
if parent_allocated != child_limit_sum:
|
|
||||||
# Decide whether to fix the allocated val or just
|
|
||||||
# keep track of what's messed up
|
|
||||||
if fix_allocated_quotas:
|
if fix_allocated_quotas:
|
||||||
try:
|
try:
|
||||||
db.quota_allocated_update(ctxt, cur_project_id,
|
db.quota_allocated_update(ctxt, cur_proj_id,
|
||||||
resource,
|
resource,
|
||||||
child_limit_sum)
|
calc_parent_allocated)
|
||||||
except exception.ProjectQuotaNotFound:
|
except exception.ProjectQuotaNotFound:
|
||||||
# Handles the case that the project is using
|
# If it was default quota create DB entry for it
|
||||||
# default quota value so nothing present to update
|
|
||||||
db.quota_create(
|
db.quota_create(
|
||||||
ctxt, cur_project_id, resource,
|
ctxt, cur_proj_id, resource,
|
||||||
parent_limit, allocated=child_limit_sum)
|
parent_limit, allocated=calc_parent_allocated)
|
||||||
else:
|
else:
|
||||||
if cur_project_id not in borked_allocated_quotas:
|
if cur_proj_id not in borked_allocated_quotas:
|
||||||
borked_allocated_quotas[cur_project_id] = {}
|
borked_allocated_quotas[cur_proj_id] = {}
|
||||||
|
|
||||||
borked_allocated_quotas[cur_project_id][resource] = {
|
borked_allocated_quotas[cur_proj_id][resource] = {
|
||||||
'db_allocated_quota': parent_allocated,
|
'db_allocated_quota': cur_parent_allocated,
|
||||||
'expected_allocated_quota': child_limit_sum}
|
'expected_allocated_quota': calc_parent_allocated}
|
||||||
|
|
||||||
project_queue.extend(project_subtree.items())
|
project_queue.extend(project_subtree.items())
|
||||||
|
|
||||||
@ -548,6 +526,76 @@ class NestedDbQuotaDriver(DbQuotaDriver):
|
|||||||
"project quotas: %s") % borked_allocated_quotas
|
"project quotas: %s") % borked_allocated_quotas
|
||||||
raise exception.InvalidNestedQuotaSetup(message=msg)
|
raise exception.InvalidNestedQuotaSetup(message=msg)
|
||||||
|
|
||||||
|
def _get_cur_project_allocated(self, ctxt, resource, project_tree):
|
||||||
|
"""Recursively calculates the allocated value of a project
|
||||||
|
|
||||||
|
:param ctxt: context used to retrieve DB values
|
||||||
|
:param resource: the resource to calculate allocated value for
|
||||||
|
:param project_tree: the project tree used to calculate allocated
|
||||||
|
e.g. {'A': {'B': {'D': None}, 'C': None}
|
||||||
|
|
||||||
|
A project's "allocated" value depends on:
|
||||||
|
1) the quota limits which have been "given" to it's children, in
|
||||||
|
the case those limits are not unlimited (-1)
|
||||||
|
2) the current quota being used by a child plus whatever the child
|
||||||
|
has given to it's children, in the case of unlimited (-1) limits
|
||||||
|
|
||||||
|
Scenario #2 requires recursively calculating allocated, and in order
|
||||||
|
to efficiently calculate things we will save off any previously
|
||||||
|
calculated allocated values.
|
||||||
|
|
||||||
|
NOTE: this currently leaves a race condition when a project's allocated
|
||||||
|
value has been calculated (with a -1 limit), but then a child project
|
||||||
|
gets a volume created, thus changing the in-use value and messing up
|
||||||
|
the child's allocated value. We should look into updating the allocated
|
||||||
|
values as we're going along and switching to NestedQuotaDriver with
|
||||||
|
flip of a switch.
|
||||||
|
"""
|
||||||
|
# Grab the current node
|
||||||
|
cur_project_id = list(project_tree)[0]
|
||||||
|
project_subtree = project_tree[cur_project_id]
|
||||||
|
res_name = resource.name
|
||||||
|
|
||||||
|
if cur_project_id not in self._allocated:
|
||||||
|
self._allocated[cur_project_id] = {}
|
||||||
|
|
||||||
|
if res_name not in self._allocated[cur_project_id]:
|
||||||
|
# Calculate the allocated value for this resource since haven't yet
|
||||||
|
cur_project_allocated = 0
|
||||||
|
child_proj_ids = project_subtree.keys() if project_subtree else {}
|
||||||
|
res_dict = {res_name: resource}
|
||||||
|
child_project_quotas = {child_id: self.get_project_quotas(
|
||||||
|
ctxt, res_dict, child_id) for child_id in child_proj_ids}
|
||||||
|
|
||||||
|
for child_id, child_quota in child_project_quotas.items():
|
||||||
|
child_limit = child_quota[res_name]['limit']
|
||||||
|
# Non-unlimited quota is easy, anything explicitly given to a
|
||||||
|
# child project gets added into allocated value
|
||||||
|
if child_limit != -1:
|
||||||
|
if child_quota[res_name].get('in_use', 0) > child_limit:
|
||||||
|
msg = _("Quota limit invalid for project '%(proj)s' "
|
||||||
|
"for resource '%(res)s': limit of %(limit)d "
|
||||||
|
"is less than in-use value of %(used)d") % {
|
||||||
|
'proj': child_id, 'res': res_name,
|
||||||
|
'limit': child_limit,
|
||||||
|
'used': child_quota[res_name]['in_use']
|
||||||
|
}
|
||||||
|
raise exception.InvalidNestedQuotaSetup(reason=msg)
|
||||||
|
|
||||||
|
cur_project_allocated += child_limit
|
||||||
|
# For -1, take any quota being eaten up by child, as well as
|
||||||
|
# what the child itself has given up to its children
|
||||||
|
else:
|
||||||
|
child_in_use = child_quota[res_name].get('in_use', 0)
|
||||||
|
# Recursively calculate child's allocated
|
||||||
|
child_alloc = self._get_cur_project_allocated(
|
||||||
|
ctxt, resource, {child_id: project_subtree[child_id]})
|
||||||
|
cur_project_allocated += child_in_use + child_alloc
|
||||||
|
|
||||||
|
self._allocated[cur_project_id][res_name] = cur_project_allocated
|
||||||
|
|
||||||
|
return self._allocated[cur_project_id][res_name]
|
||||||
|
|
||||||
def get_default(self, context, resource, project_id):
|
def get_default(self, context, resource, project_id):
|
||||||
"""Get a specific default quota for a resource."""
|
"""Get a specific default quota for a resource."""
|
||||||
resource = super(NestedDbQuotaDriver, self).get_default(
|
resource = super(NestedDbQuotaDriver, self).get_default(
|
||||||
@ -565,6 +613,33 @@ class NestedDbQuotaDriver(DbQuotaDriver):
|
|||||||
defaults[key] = 0
|
defaults[key] = 0
|
||||||
return defaults
|
return defaults
|
||||||
|
|
||||||
|
def _reserve(self, context, resources, quotas, deltas, expire, project_id):
|
||||||
|
reserved = []
|
||||||
|
# As to not change the exception behavior, flag every res that would
|
||||||
|
# be over instead of failing on first OverQuota
|
||||||
|
resources_failed_to_update = []
|
||||||
|
failed_usages = {}
|
||||||
|
for res in deltas.keys():
|
||||||
|
try:
|
||||||
|
reserved += db.quota_reserve(
|
||||||
|
context, resources, quotas, {res: deltas[res]},
|
||||||
|
expire, CONF.until_refresh, CONF.max_age, project_id)
|
||||||
|
if quotas[res] == -1:
|
||||||
|
reserved += quota_utils.update_alloc_to_next_hard_limit(
|
||||||
|
context, resources, deltas, res, expire, project_id)
|
||||||
|
except exception.OverQuota as e:
|
||||||
|
resources_failed_to_update.append(res)
|
||||||
|
failed_usages.update(e.kwargs['usages'])
|
||||||
|
|
||||||
|
if resources_failed_to_update:
|
||||||
|
db.reservation_rollback(context, reserved, project_id)
|
||||||
|
# We change OverQuota to OverVolumeLimit in other places and expect
|
||||||
|
# to find all of the OverQuota kwargs
|
||||||
|
raise exception.OverQuota(overs=sorted(resources_failed_to_update),
|
||||||
|
quotas=quotas, usages=failed_usages)
|
||||||
|
|
||||||
|
return reserved
|
||||||
|
|
||||||
|
|
||||||
class BaseResource(object):
|
class BaseResource(object):
|
||||||
"""Describe a single resource for quota checking."""
|
"""Describe a single resource for quota checking."""
|
||||||
@ -782,9 +857,18 @@ class QuotaEngine(object):
|
|||||||
|
|
||||||
def get_by_project(self, context, project_id, resource_name):
|
def get_by_project(self, context, project_id, resource_name):
|
||||||
"""Get a specific quota by project."""
|
"""Get a specific quota by project."""
|
||||||
|
|
||||||
return self._driver.get_by_project(context, project_id, resource_name)
|
return self._driver.get_by_project(context, project_id, resource_name)
|
||||||
|
|
||||||
|
def get_by_project_or_default(self, context, project_id, resource_name):
|
||||||
|
"""Get specific quota by project or default quota if doesn't exists."""
|
||||||
|
try:
|
||||||
|
val = self.get_by_project(
|
||||||
|
context, project_id, resource_name).hard_limit
|
||||||
|
except exception.ProjectQuotaNotFound:
|
||||||
|
val = self.get_defaults(context, project_id)[resource_name]
|
||||||
|
|
||||||
|
return val
|
||||||
|
|
||||||
def get_by_class(self, context, quota_class, resource_name):
|
def get_by_class(self, context, quota_class, resource_name):
|
||||||
"""Get a specific quota by quota class."""
|
"""Get a specific quota by quota class."""
|
||||||
|
|
||||||
|
@ -22,6 +22,7 @@ from keystoneclient import client
|
|||||||
from keystoneclient import exceptions
|
from keystoneclient import exceptions
|
||||||
from keystoneclient import session
|
from keystoneclient import session
|
||||||
|
|
||||||
|
from cinder import db
|
||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder.i18n import _, _LW
|
from cinder.i18n import _, _LW
|
||||||
|
|
||||||
@ -102,7 +103,8 @@ def get_volume_type_reservation(ctxt, volume, type_id,
|
|||||||
return reservations
|
return reservations
|
||||||
|
|
||||||
|
|
||||||
def get_project_hierarchy(context, project_id, subtree_as_ids=False):
|
def get_project_hierarchy(context, project_id, subtree_as_ids=False,
|
||||||
|
parents_as_ids=False):
|
||||||
"""A Helper method to get the project hierarchy.
|
"""A Helper method to get the project hierarchy.
|
||||||
|
|
||||||
Along with hierarchical multitenancy in keystone API v3, projects can be
|
Along with hierarchical multitenancy in keystone API v3, projects can be
|
||||||
@ -114,10 +116,13 @@ def get_project_hierarchy(context, project_id, subtree_as_ids=False):
|
|||||||
generic_project = GenericProjectInfo(project_id, keystone.version)
|
generic_project = GenericProjectInfo(project_id, keystone.version)
|
||||||
if keystone.version == 'v3':
|
if keystone.version == 'v3':
|
||||||
project = keystone.projects.get(project_id,
|
project = keystone.projects.get(project_id,
|
||||||
subtree_as_ids=subtree_as_ids)
|
subtree_as_ids=subtree_as_ids,
|
||||||
|
parents_as_ids=parents_as_ids)
|
||||||
generic_project.parent_id = project.parent_id
|
generic_project.parent_id = project.parent_id
|
||||||
generic_project.subtree = (
|
generic_project.subtree = (
|
||||||
project.subtree if subtree_as_ids else None)
|
project.subtree if subtree_as_ids else None)
|
||||||
|
generic_project.parents = (
|
||||||
|
project.parents if parents_as_ids else None)
|
||||||
except exceptions.NotFound:
|
except exceptions.NotFound:
|
||||||
msg = (_("Tenant ID: %s does not exist.") % project_id)
|
msg = (_("Tenant ID: %s does not exist.") % project_id)
|
||||||
raise webob.exc.HTTPNotFound(explanation=msg)
|
raise webob.exc.HTTPNotFound(explanation=msg)
|
||||||
@ -145,6 +150,35 @@ def get_all_root_project_ids(context):
|
|||||||
return project_roots
|
return project_roots
|
||||||
|
|
||||||
|
|
||||||
|
def update_alloc_to_next_hard_limit(context, resources, deltas, res,
|
||||||
|
expire, project_id):
|
||||||
|
from cinder import quota
|
||||||
|
QUOTAS = quota.QUOTAS
|
||||||
|
reservations = []
|
||||||
|
projects = get_project_hierarchy(context, project_id,
|
||||||
|
parents_as_ids=True).parents
|
||||||
|
hard_limit_found = False
|
||||||
|
# Update allocated values up the chain til we hit a hard limit or run out
|
||||||
|
# of parents
|
||||||
|
while projects and not hard_limit_found:
|
||||||
|
cur_proj_id = list(projects)[0]
|
||||||
|
projects = projects[cur_proj_id]
|
||||||
|
cur_quota_lim = QUOTAS.get_by_project_or_default(
|
||||||
|
context, cur_proj_id, res)
|
||||||
|
hard_limit_found = (cur_quota_lim != -1)
|
||||||
|
cur_quota = {res: cur_quota_lim}
|
||||||
|
cur_delta = {res: deltas[res]}
|
||||||
|
try:
|
||||||
|
reservations += db.quota_reserve(
|
||||||
|
context, resources, cur_quota, cur_delta, expire,
|
||||||
|
CONF.until_refresh, CONF.max_age, cur_proj_id,
|
||||||
|
is_allocated_reserve=True)
|
||||||
|
except exception.OverQuota:
|
||||||
|
db.reservation_rollback(context, reservations)
|
||||||
|
raise
|
||||||
|
return reservations
|
||||||
|
|
||||||
|
|
||||||
def validate_setup_for_nested_quota_use(ctxt, resources,
|
def validate_setup_for_nested_quota_use(ctxt, resources,
|
||||||
nested_quota_driver,
|
nested_quota_driver,
|
||||||
fix_allocated_quotas=False):
|
fix_allocated_quotas=False):
|
||||||
|
@ -29,6 +29,7 @@ import webob.exc
|
|||||||
from cinder.api.contrib import quotas
|
from cinder.api.contrib import quotas
|
||||||
from cinder import context
|
from cinder import context
|
||||||
from cinder import db
|
from cinder import db
|
||||||
|
from cinder import exception
|
||||||
from cinder import quota
|
from cinder import quota
|
||||||
from cinder import test
|
from cinder import test
|
||||||
from cinder.tests.unit import test_db_api
|
from cinder.tests.unit import test_db_api
|
||||||
@ -43,7 +44,7 @@ CONF = cfg.CONF
|
|||||||
|
|
||||||
def make_body(root=True, gigabytes=1000, snapshots=10,
|
def make_body(root=True, gigabytes=1000, snapshots=10,
|
||||||
volumes=10, backups=10, backup_gigabytes=1000,
|
volumes=10, backups=10, backup_gigabytes=1000,
|
||||||
tenant_id='foo', per_volume_gigabytes=-1, is_child=False):
|
tenant_id='foo', per_volume_gigabytes=-1):
|
||||||
resources = {'gigabytes': gigabytes,
|
resources = {'gigabytes': gigabytes,
|
||||||
'snapshots': snapshots,
|
'snapshots': snapshots,
|
||||||
'volumes': volumes,
|
'volumes': volumes,
|
||||||
@ -53,17 +54,10 @@ def make_body(root=True, gigabytes=1000, snapshots=10,
|
|||||||
# need to consider preexisting volume types as well
|
# need to consider preexisting volume types as well
|
||||||
volume_types = db.volume_type_get_all(context.get_admin_context())
|
volume_types = db.volume_type_get_all(context.get_admin_context())
|
||||||
|
|
||||||
if not is_child:
|
for volume_type in volume_types:
|
||||||
for volume_type in volume_types:
|
resources['gigabytes_' + volume_type] = -1
|
||||||
resources['gigabytes_' + volume_type] = -1
|
resources['snapshots_' + volume_type] = -1
|
||||||
resources['snapshots_' + volume_type] = -1
|
resources['volumes_' + volume_type] = -1
|
||||||
resources['volumes_' + volume_type] = -1
|
|
||||||
elif per_volume_gigabytes < 0:
|
|
||||||
# In the case that we're dealing with a child project, we aren't
|
|
||||||
# allowing -1 limits for the time being, so hack this to some large
|
|
||||||
# enough value for the tests that it's essentially unlimited
|
|
||||||
# TODO(mc_nair): remove when -1 limits for child projects are allowed
|
|
||||||
resources['per_volume_gigabytes'] = 10000
|
|
||||||
|
|
||||||
if tenant_id:
|
if tenant_id:
|
||||||
resources['id'] = tenant_id
|
resources['id'] = tenant_id
|
||||||
@ -91,6 +85,7 @@ class QuotaSetsControllerTestBase(test.TestCase):
|
|||||||
self.id = id
|
self.id = id
|
||||||
self.parent_id = parent_id
|
self.parent_id = parent_id
|
||||||
self.subtree = None
|
self.subtree = None
|
||||||
|
self.parents = None
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(QuotaSetsControllerTestBase, self).setUp()
|
super(QuotaSetsControllerTestBase, self).setUp()
|
||||||
@ -103,6 +98,7 @@ class QuotaSetsControllerTestBase(test.TestCase):
|
|||||||
self.req.params = {}
|
self.req.params = {}
|
||||||
|
|
||||||
self._create_project_hierarchy()
|
self._create_project_hierarchy()
|
||||||
|
self.req.environ['cinder.context'].project_id = self.A.id
|
||||||
|
|
||||||
get_patcher = mock.patch('cinder.quota_utils.get_project_hierarchy',
|
get_patcher = mock.patch('cinder.quota_utils.get_project_hierarchy',
|
||||||
self._get_project)
|
self._get_project)
|
||||||
@ -143,21 +139,31 @@ class QuotaSetsControllerTestBase(test.TestCase):
|
|||||||
self.B.subtree = {self.D.id: self.D.subtree}
|
self.B.subtree = {self.D.id: self.D.subtree}
|
||||||
self.A.subtree = {self.B.id: self.B.subtree, self.C.id: self.C.subtree}
|
self.A.subtree = {self.B.id: self.B.subtree, self.C.id: self.C.subtree}
|
||||||
|
|
||||||
|
self.A.parents = None
|
||||||
|
self.B.parents = {self.A.id: None}
|
||||||
|
self.C.parents = {self.A.id: None}
|
||||||
|
self.D.parents = {self.B.id: self.B.parents}
|
||||||
|
|
||||||
# project_by_id attribute is used to recover a project based on its id.
|
# project_by_id attribute is used to recover a project based on its id.
|
||||||
self.project_by_id = {self.A.id: self.A, self.B.id: self.B,
|
self.project_by_id = {self.A.id: self.A, self.B.id: self.B,
|
||||||
self.C.id: self.C, self.D.id: self.D}
|
self.C.id: self.C, self.D.id: self.D}
|
||||||
|
|
||||||
def _get_project(self, context, id, subtree_as_ids=False):
|
def _get_project(self, context, id, subtree_as_ids=False,
|
||||||
|
parents_as_ids=False):
|
||||||
return self.project_by_id.get(id, self.FakeProject())
|
return self.project_by_id.get(id, self.FakeProject())
|
||||||
|
|
||||||
|
def _create_fake_quota_usages(self, usage_map):
|
||||||
|
self._fake_quota_usages = {}
|
||||||
|
for key, val in usage_map.items():
|
||||||
|
self._fake_quota_usages[key] = {'in_use': val}
|
||||||
|
|
||||||
|
def _fake_quota_usage_get_all_by_project(self, context, project_id):
|
||||||
|
return {'volumes': self._fake_quota_usages[project_id]}
|
||||||
|
|
||||||
|
|
||||||
class QuotaSetsControllerTest(QuotaSetsControllerTestBase):
|
class QuotaSetsControllerTest(QuotaSetsControllerTestBase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(QuotaSetsControllerTest, self).setUp()
|
super(QuotaSetsControllerTest, self).setUp()
|
||||||
fixture = self.useFixture(config_fixture.Config(quota.CONF))
|
|
||||||
fixture.config(quota_driver="cinder.quota.DbQuotaDriver")
|
|
||||||
quotas.QUOTAS = quota.VolumeTypeQuotaEngine()
|
|
||||||
self.controller = quotas.QuotaSetsController()
|
|
||||||
|
|
||||||
def test_defaults(self):
|
def test_defaults(self):
|
||||||
result = self.controller.defaults(self.req, 'foo')
|
result = self.controller.defaults(self.req, 'foo')
|
||||||
@ -297,7 +303,8 @@ class QuotaSetsControllerTest(QuotaSetsControllerTestBase):
|
|||||||
'skip_validation': 'false'}
|
'skip_validation': 'false'}
|
||||||
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
|
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
|
||||||
self.req, 'foo', body)
|
self.req, 'foo', body)
|
||||||
body = {'quota_set': {'gigabytes': 1},
|
# Ensure that validation works even if some resources are valid
|
||||||
|
body = {'quota_set': {'gigabytes': 1, 'volumes': 10},
|
||||||
'skip_validation': 'false'}
|
'skip_validation': 'false'}
|
||||||
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
|
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
|
||||||
self.req, 'foo', body)
|
self.req, 'foo', body)
|
||||||
@ -451,20 +458,11 @@ class QuotaSetControllerValidateNestedQuotaSetup(QuotaSetsControllerTestBase):
|
|||||||
self.req.params['fix_allocated_quotas'] = True
|
self.req.params['fix_allocated_quotas'] = True
|
||||||
self.controller.validate_setup_for_nested_quota_use(self.req)
|
self.controller.validate_setup_for_nested_quota_use(self.req)
|
||||||
|
|
||||||
def _fake_quota_usage_get_all_by_project(self, context, project_id):
|
|
||||||
proj_vals = {
|
|
||||||
self.A.id: {'in_use': 1},
|
|
||||||
self.B.id: {'in_use': 1},
|
|
||||||
self.D.id: {'in_use': 0},
|
|
||||||
self.C.id: {'in_use': 3},
|
|
||||||
self.E.id: {'in_use': 0},
|
|
||||||
self.F.id: {'in_use': 0},
|
|
||||||
self.G.id: {'in_use': 0},
|
|
||||||
}
|
|
||||||
return {'volumes': proj_vals[project_id]}
|
|
||||||
|
|
||||||
@mock.patch('cinder.db.quota_usage_get_all_by_project')
|
@mock.patch('cinder.db.quota_usage_get_all_by_project')
|
||||||
def test_validate_nested_quotas_in_use_vols(self, mock_usage):
|
def test_validate_nested_quotas_in_use_vols(self, mock_usage):
|
||||||
|
self._create_fake_quota_usages(
|
||||||
|
{self.A.id: 1, self.B.id: 1, self.D.id: 0, self.C.id: 3,
|
||||||
|
self.E.id: 0, self.F.id: 0, self.G.id: 0})
|
||||||
mock_usage.side_effect = self._fake_quota_usage_get_all_by_project
|
mock_usage.side_effect = self._fake_quota_usage_get_all_by_project
|
||||||
|
|
||||||
# Update the project A quota.
|
# Update the project A quota.
|
||||||
@ -493,6 +491,9 @@ class QuotaSetControllerValidateNestedQuotaSetup(QuotaSetsControllerTestBase):
|
|||||||
|
|
||||||
@mock.patch('cinder.db.quota_usage_get_all_by_project')
|
@mock.patch('cinder.db.quota_usage_get_all_by_project')
|
||||||
def test_validate_nested_quotas_quota_borked(self, mock_usage):
|
def test_validate_nested_quotas_quota_borked(self, mock_usage):
|
||||||
|
self._create_fake_quota_usages(
|
||||||
|
{self.A.id: 1, self.B.id: 1, self.D.id: 0, self.C.id: 3,
|
||||||
|
self.E.id: 0, self.F.id: 0, self.G.id: 0})
|
||||||
mock_usage.side_effect = self._fake_quota_usage_get_all_by_project
|
mock_usage.side_effect = self._fake_quota_usage_get_all_by_project
|
||||||
|
|
||||||
# Update the project A quota.
|
# Update the project A quota.
|
||||||
@ -508,38 +509,74 @@ class QuotaSetControllerValidateNestedQuotaSetup(QuotaSetsControllerTestBase):
|
|||||||
self.controller.validate_setup_for_nested_quota_use,
|
self.controller.validate_setup_for_nested_quota_use,
|
||||||
self.req)
|
self.req)
|
||||||
|
|
||||||
def test_validate_nested_quota_negative_limits(self):
|
@mock.patch('cinder.db.quota_usage_get_all_by_project')
|
||||||
# When we're validating, update the allocated values since we've
|
def test_validate_nested_quota_negative_limits(self, mock_usage):
|
||||||
# been updating child limits
|
# TODO(mc_nair): this test case can be moved to Tempest once nested
|
||||||
self.req.params['fix_allocated_quotas'] = True
|
# quota coverage added
|
||||||
self.controller.validate_setup_for_nested_quota_use(self.req)
|
self._create_fake_quota_usages(
|
||||||
# Update the project A quota.
|
{self.A.id: 1, self.B.id: 3, self.C.id: 0, self.D.id: 2,
|
||||||
|
self.E.id: 2, self.F.id: 0, self.G.id: 0})
|
||||||
|
mock_usage.side_effect = self._fake_quota_usage_get_all_by_project
|
||||||
|
|
||||||
|
# Setting E-F as children of D for this test case to flex the muscles
|
||||||
|
# of more complex nesting
|
||||||
|
self.D.subtree = {self.E.id: self.E.subtree}
|
||||||
|
self.E.parent_id = self.D.id
|
||||||
|
# Get B's subtree up to date with this change
|
||||||
|
self.B.subtree[self.D.id] = self.D.subtree
|
||||||
|
|
||||||
|
# Quota heirarchy now is
|
||||||
|
# / B - D - E - F
|
||||||
|
# A
|
||||||
|
# \ C
|
||||||
|
#
|
||||||
|
# G
|
||||||
|
|
||||||
self.req.environ['cinder.context'].project_id = self.A.id
|
self.req.environ['cinder.context'].project_id = self.A.id
|
||||||
quota_limit = {'volumes': -1}
|
quota_limit = {'volumes': 10}
|
||||||
body = {'quota_set': quota_limit}
|
body = {'quota_set': quota_limit}
|
||||||
self.controller.update(self.req, self.A.id, body)
|
self.controller.update(self.req, self.A.id, body)
|
||||||
|
|
||||||
quota_limit['volumes'] = 4
|
quota_limit['volumes'] = 1
|
||||||
self.controller.update(self.req, self.B.id, body)
|
self.controller.update(self.req, self.C.id, body)
|
||||||
|
|
||||||
self.controller.validate_setup_for_nested_quota_use(self.req)
|
|
||||||
|
|
||||||
quota_limit['volumes'] = -1
|
quota_limit['volumes'] = -1
|
||||||
|
self.controller.update(self.req, self.B.id, body)
|
||||||
|
self.controller.update(self.req, self.D.id, body)
|
||||||
self.controller.update(self.req, self.F.id, body)
|
self.controller.update(self.req, self.F.id, body)
|
||||||
# Should not work because can't have a child with negative limits
|
quota_limit['volumes'] = 5
|
||||||
self.assertRaises(
|
self.controller.update(self.req, self.E.id, body)
|
||||||
webob.exc.HTTPBadRequest,
|
|
||||||
self.controller.validate_setup_for_nested_quota_use,
|
# Should fail because too much is allocated to children for A
|
||||||
self.req)
|
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||||
|
self.controller.validate_setup_for_nested_quota_use,
|
||||||
|
self.req)
|
||||||
|
|
||||||
|
# When root has -1 limit, children can allocate as much as they want
|
||||||
|
quota_limit['volumes'] = -1
|
||||||
|
self.controller.update(self.req, self.A.id, body)
|
||||||
|
self.req.params['fix_allocated_quotas'] = True
|
||||||
|
self.controller.validate_setup_for_nested_quota_use(self.req)
|
||||||
|
|
||||||
|
# Not unlimited, but make children's allocated within bounds
|
||||||
|
quota_limit['volumes'] = 10
|
||||||
|
self.controller.update(self.req, self.A.id, body)
|
||||||
|
quota_limit['volumes'] = 3
|
||||||
|
self.controller.update(self.req, self.E.id, body)
|
||||||
|
self.req.params['fix_allocated_quotas'] = True
|
||||||
|
self.controller.validate_setup_for_nested_quota_use(self.req)
|
||||||
|
self.req.params['fix_allocated_quotas'] = False
|
||||||
|
self.controller.validate_setup_for_nested_quota_use(self.req)
|
||||||
|
|
||||||
|
|
||||||
class QuotaSetsControllerNestedQuotasTest(QuotaSetsControllerTestBase):
|
class QuotaSetsControllerNestedQuotasTest(QuotaSetsControllerTestBase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(QuotaSetsControllerNestedQuotasTest, self).setUp()
|
super(QuotaSetsControllerNestedQuotasTest, self).setUp()
|
||||||
fixture = self.useFixture(config_fixture.Config(quota.CONF))
|
driver = quota.NestedDbQuotaDriver()
|
||||||
fixture.config(quota_driver="cinder.quota.NestedDbQuotaDriver")
|
patcher = mock.patch('cinder.quota.VolumeTypeQuotaEngine._driver',
|
||||||
quotas.QUOTAS = quota.VolumeTypeQuotaEngine()
|
driver)
|
||||||
self.controller = quotas.QuotaSetsController()
|
patcher.start()
|
||||||
|
self.addCleanup(patcher.stop)
|
||||||
|
|
||||||
def test_subproject_defaults(self):
|
def test_subproject_defaults(self):
|
||||||
context = self.req.environ['cinder.context']
|
context = self.req.environ['cinder.context']
|
||||||
@ -584,7 +621,6 @@ class QuotaSetsControllerNestedQuotasTest(QuotaSetsControllerTestBase):
|
|||||||
self.req, self.A.id)
|
self.req, self.A.id)
|
||||||
|
|
||||||
def test_update_subproject_not_in_hierarchy(self):
|
def test_update_subproject_not_in_hierarchy(self):
|
||||||
|
|
||||||
# Create another project hierarchy
|
# Create another project hierarchy
|
||||||
E = self.FakeProject(id=uuid.uuid4().hex, parent_id=None)
|
E = self.FakeProject(id=uuid.uuid4().hex, parent_id=None)
|
||||||
F = self.FakeProject(id=uuid.uuid4().hex, parent_id=E.id)
|
F = self.FakeProject(id=uuid.uuid4().hex, parent_id=E.id)
|
||||||
@ -616,37 +652,29 @@ class QuotaSetsControllerNestedQuotasTest(QuotaSetsControllerTestBase):
|
|||||||
# Update the quota of B to be equal to its parent quota
|
# Update the quota of B to be equal to its parent quota
|
||||||
self.req.environ['cinder.context'].project_id = self.A.id
|
self.req.environ['cinder.context'].project_id = self.A.id
|
||||||
body = make_body(gigabytes=2000, snapshots=15,
|
body = make_body(gigabytes=2000, snapshots=15,
|
||||||
volumes=5, backups=5, tenant_id=None, is_child=True)
|
volumes=5, backups=5, tenant_id=None)
|
||||||
result = self.controller.update(self.req, self.B.id, body)
|
result = self.controller.update(self.req, self.B.id, body)
|
||||||
self.assertDictMatch(body, result)
|
self.assertDictMatch(body, result)
|
||||||
# Try to update the quota of C, it will not be allowed, since the
|
# Try to update the quota of C, it will not be allowed, since the
|
||||||
# project A doesn't have free quota available.
|
# project A doesn't have free quota available.
|
||||||
self.req.environ['cinder.context'].project_id = self.A.id
|
self.req.environ['cinder.context'].project_id = self.A.id
|
||||||
body = make_body(gigabytes=2000, snapshots=15,
|
body = make_body(gigabytes=2000, snapshots=15,
|
||||||
volumes=5, backups=5, tenant_id=None, is_child=True)
|
volumes=5, backups=5, tenant_id=None)
|
||||||
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
|
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
|
||||||
self.req, self.C.id, body)
|
self.req, self.C.id, body)
|
||||||
# Successfully update the quota of D.
|
# Successfully update the quota of D.
|
||||||
self.req.environ['cinder.context'].project_id = self.A.id
|
self.req.environ['cinder.context'].project_id = self.A.id
|
||||||
body = make_body(gigabytes=1000, snapshots=7,
|
body = make_body(gigabytes=1000, snapshots=7,
|
||||||
volumes=3, backups=3, tenant_id=None, is_child=True)
|
volumes=3, backups=3, tenant_id=None)
|
||||||
result = self.controller.update(self.req, self.D.id, body)
|
result = self.controller.update(self.req, self.D.id, body)
|
||||||
self.assertDictMatch(body, result)
|
self.assertDictMatch(body, result)
|
||||||
# An admin of B can also update the quota of D, since D is its
|
# An admin of B can also update the quota of D, since D is its
|
||||||
# immediate child.
|
# immediate child.
|
||||||
self.req.environ['cinder.context'].project_id = self.B.id
|
self.req.environ['cinder.context'].project_id = self.B.id
|
||||||
body = make_body(gigabytes=1500, snapshots=10,
|
body = make_body(gigabytes=1500, snapshots=10,
|
||||||
volumes=4, backups=4, tenant_id=None, is_child=True)
|
volumes=4, backups=4, tenant_id=None)
|
||||||
self.controller.update(self.req, self.D.id, body)
|
self.controller.update(self.req, self.D.id, body)
|
||||||
|
|
||||||
def test_update_subproject_negative_limit(self):
|
|
||||||
# Should not be able to set a negative limit for a child project (will
|
|
||||||
# require further fixes to allow for this)
|
|
||||||
self.req.environ['cinder.context'].project_id = self.A.id
|
|
||||||
body = make_body(volumes=-1, is_child=True)
|
|
||||||
self.assertRaises(webob.exc.HTTPBadRequest,
|
|
||||||
self.controller.update, self.req, self.B.id, body)
|
|
||||||
|
|
||||||
def test_update_subproject_repetitive(self):
|
def test_update_subproject_repetitive(self):
|
||||||
# Update the project A volumes quota.
|
# Update the project A volumes quota.
|
||||||
self.req.environ['cinder.context'].project_id = self.A.id
|
self.req.environ['cinder.context'].project_id = self.A.id
|
||||||
@ -660,8 +688,7 @@ class QuotaSetsControllerNestedQuotasTest(QuotaSetsControllerTestBase):
|
|||||||
for i in range(0, 3):
|
for i in range(0, 3):
|
||||||
self.req.environ['cinder.context'].project_id = self.A.id
|
self.req.environ['cinder.context'].project_id = self.A.id
|
||||||
body = make_body(gigabytes=2000, snapshots=15,
|
body = make_body(gigabytes=2000, snapshots=15,
|
||||||
volumes=10, backups=5, tenant_id=None,
|
volumes=10, backups=5, tenant_id=None)
|
||||||
is_child=True)
|
|
||||||
result = self.controller.update(self.req, self.B.id, body)
|
result = self.controller.update(self.req, self.B.id, body)
|
||||||
self.assertDictMatch(body, result)
|
self.assertDictMatch(body, result)
|
||||||
|
|
||||||
@ -686,17 +713,50 @@ class QuotaSetsControllerNestedQuotasTest(QuotaSetsControllerTestBase):
|
|||||||
self.req.environ['cinder.context'].project_id = self.A.id
|
self.req.environ['cinder.context'].project_id = self.A.id
|
||||||
# Update the project B quota.
|
# Update the project B quota.
|
||||||
expected = make_body(gigabytes=1000, snapshots=10,
|
expected = make_body(gigabytes=1000, snapshots=10,
|
||||||
volumes=5, backups=5, tenant_id=None,
|
volumes=5, backups=5, tenant_id=None)
|
||||||
is_child=True)
|
|
||||||
result = self.controller.update(self.req, self.B.id, expected)
|
result = self.controller.update(self.req, self.B.id, expected)
|
||||||
self.assertDictMatch(expected, result)
|
self.assertDictMatch(expected, result)
|
||||||
|
|
||||||
|
def _assert_quota_show(self, proj_id, resource, in_use=0, reserved=0,
|
||||||
|
allocated=0, limit=0):
|
||||||
|
self.req.params = {'usage': 'True'}
|
||||||
|
show_res = self.controller.show(self.req, proj_id)
|
||||||
|
expected = {'in_use': in_use, 'reserved': reserved,
|
||||||
|
'allocated': allocated, 'limit': limit}
|
||||||
|
self.assertEqual(expected, show_res['quota_set'][resource])
|
||||||
|
|
||||||
|
def test_project_allocated_considered_on_reserve(self):
|
||||||
|
def _reserve(project_id):
|
||||||
|
quotas.QUOTAS._driver.reserve(
|
||||||
|
self.req.environ['cinder.context'], quotas.QUOTAS.resources,
|
||||||
|
{'volumes': 1}, project_id=project_id)
|
||||||
|
|
||||||
|
# A's quota will default to 10 for volumes
|
||||||
|
quota = {'volumes': 5}
|
||||||
|
body = {'quota_set': quota}
|
||||||
|
self.controller.update(self.req, self.B.id, body)
|
||||||
|
self._assert_quota_show(self.A.id, 'volumes', allocated=5, limit=10)
|
||||||
|
quota['volumes'] = 3
|
||||||
|
self.controller.update(self.req, self.C.id, body)
|
||||||
|
self._assert_quota_show(self.A.id, 'volumes', allocated=8, limit=10)
|
||||||
|
_reserve(self.A.id)
|
||||||
|
_reserve(self.A.id)
|
||||||
|
self.assertRaises(exception.OverQuota, _reserve, self.A.id)
|
||||||
|
|
||||||
|
def test_update_parent_project_lower_than_child(self):
|
||||||
|
# A's quota will be default of 10
|
||||||
|
quota = {'volumes': 10}
|
||||||
|
body = {'quota_set': quota}
|
||||||
|
self.controller.update(self.req, self.B.id, body)
|
||||||
|
quota['volumes'] = 9
|
||||||
|
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||||
|
self.controller.update, self.req, self.A.id, body)
|
||||||
|
|
||||||
def test_subproject_delete(self):
|
def test_subproject_delete(self):
|
||||||
self.req.environ['cinder.context'].project_id = self.A.id
|
self.req.environ['cinder.context'].project_id = self.A.id
|
||||||
|
|
||||||
body = make_body(gigabytes=2000, snapshots=15,
|
body = make_body(gigabytes=2000, snapshots=15, volumes=5, backups=5,
|
||||||
volumes=5, backups=5,
|
backup_gigabytes=1000, tenant_id=None)
|
||||||
backup_gigabytes=1000, tenant_id=None, is_child=True)
|
|
||||||
result_update = self.controller.update(self.req, self.A.id, body)
|
result_update = self.controller.update(self.req, self.A.id, body)
|
||||||
self.assertDictMatch(body, result_update)
|
self.assertDictMatch(body, result_update)
|
||||||
|
|
||||||
@ -745,7 +805,7 @@ class QuotaSetsControllerNestedQuotasTest(QuotaSetsControllerTestBase):
|
|||||||
self.controller.update(self.req, self.A.id, body)
|
self.controller.update(self.req, self.A.id, body)
|
||||||
|
|
||||||
# Allocate some of that quota to a child project
|
# Allocate some of that quota to a child project
|
||||||
body = make_body(volumes=3, is_child=True)
|
body = make_body(volumes=3)
|
||||||
self.controller.update(self.req, self.B.id, body)
|
self.controller.update(self.req, self.B.id, body)
|
||||||
|
|
||||||
# Deleting 'A' should be disallowed since 'B' is using some of that
|
# Deleting 'A' should be disallowed since 'B' is using some of that
|
||||||
@ -753,6 +813,217 @@ class QuotaSetsControllerNestedQuotasTest(QuotaSetsControllerTestBase):
|
|||||||
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
|
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
|
||||||
self.req, self.A.id)
|
self.req, self.A.id)
|
||||||
|
|
||||||
|
def test_subproject_delete_with_child_updates_parent_allocated(self):
|
||||||
|
def _assert_delete_updates_allocated():
|
||||||
|
res = 'volumes'
|
||||||
|
self._assert_quota_show(self.A.id, res, allocated=2, limit=5)
|
||||||
|
self._assert_quota_show(self.B.id, res, allocated=2, limit=-1)
|
||||||
|
self.controller.delete(self.req, self.D.id)
|
||||||
|
self._assert_quota_show(self.A.id, res, allocated=0, limit=5)
|
||||||
|
self._assert_quota_show(self.B.id, res, allocated=0, limit=-1)
|
||||||
|
|
||||||
|
quota = {'volumes': 5}
|
||||||
|
body = {'quota_set': quota}
|
||||||
|
self.controller.update(self.req, self.A.id, body)
|
||||||
|
|
||||||
|
# Allocate some of that quota to a child project using hard limit
|
||||||
|
quota['volumes'] = -1
|
||||||
|
self.controller.update(self.req, self.B.id, body)
|
||||||
|
quota['volumes'] = 2
|
||||||
|
self.controller.update(self.req, self.D.id, body)
|
||||||
|
|
||||||
|
_assert_delete_updates_allocated()
|
||||||
|
|
||||||
|
# Allocate some of that quota to a child project by using volumes
|
||||||
|
quota['volumes'] = -1
|
||||||
|
self.controller.update(self.req, self.D.id, body)
|
||||||
|
for x in range(2):
|
||||||
|
quotas.QUOTAS._driver.reserve(
|
||||||
|
self.req.environ['cinder.context'], quotas.QUOTAS.resources,
|
||||||
|
{'volumes': 1}, project_id=self.D.id)
|
||||||
|
|
||||||
|
_assert_delete_updates_allocated()
|
||||||
|
|
||||||
|
def test_negative_child_limit_not_affecting_parents_free_quota(self):
|
||||||
|
quota = {'volumes': -1}
|
||||||
|
body = {'quota_set': quota}
|
||||||
|
self.controller.update(self.req, self.C.id, body)
|
||||||
|
self.controller.update(self.req, self.B.id, body)
|
||||||
|
|
||||||
|
# Shouldn't be able to set greater than parent
|
||||||
|
quota['volumes'] = 11
|
||||||
|
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
|
||||||
|
self.req, self.B.id, body)
|
||||||
|
|
||||||
|
def test_child_neg_limit_set_grandkid_zero_limit(self):
|
||||||
|
cur_quota_a = self.controller.show(self.req, self.A.id)
|
||||||
|
self.assertEqual(10, cur_quota_a['quota_set']['volumes'])
|
||||||
|
|
||||||
|
quota = {'volumes': -1}
|
||||||
|
body = {'quota_set': quota}
|
||||||
|
self.controller.update(self.req, self.B.id, body)
|
||||||
|
|
||||||
|
cur_quota_d = self.controller.show(self.req, self.D.id)
|
||||||
|
# Default child value is 0
|
||||||
|
self.assertEqual(0, cur_quota_d['quota_set']['volumes'])
|
||||||
|
# Should be able to set D explicitly to 0 since that's already the val
|
||||||
|
quota['volumes'] = 0
|
||||||
|
self.controller.update(self.req, self.D.id, body)
|
||||||
|
|
||||||
|
def test_grandkid_negative_one_limit_enforced(self):
|
||||||
|
quota = {'volumes': 2, 'gigabytes': 2}
|
||||||
|
body = {'quota_set': quota}
|
||||||
|
self.controller.update(self.req, self.A.id, body)
|
||||||
|
|
||||||
|
quota['volumes'] = -1
|
||||||
|
quota['gigabytes'] = -1
|
||||||
|
self.controller.update(self.req, self.B.id, body)
|
||||||
|
self.controller.update(self.req, self.C.id, body)
|
||||||
|
self.controller.update(self.req, self.D.id, body)
|
||||||
|
|
||||||
|
def _reserve(project_id):
|
||||||
|
quotas.QUOTAS._driver.reserve(
|
||||||
|
self.req.environ['cinder.context'], quotas.QUOTAS.resources,
|
||||||
|
{'volumes': 1, 'gigabytes': 1}, project_id=project_id)
|
||||||
|
|
||||||
|
_reserve(self.C.id)
|
||||||
|
_reserve(self.D.id)
|
||||||
|
self.assertRaises(exception.OverQuota, _reserve, self.B.id)
|
||||||
|
self.assertRaises(exception.OverQuota, _reserve, self.C.id)
|
||||||
|
self.assertRaises(exception.OverQuota, _reserve, self.D.id)
|
||||||
|
|
||||||
|
# Make sure the rollbacks went successfully for allocated for all res
|
||||||
|
for res in quota.keys():
|
||||||
|
self._assert_quota_show(self.A.id, res, allocated=2, limit=2)
|
||||||
|
self._assert_quota_show(self.B.id, res, allocated=1, limit=-1)
|
||||||
|
self._assert_quota_show(self.C.id, res, reserved=1, limit=-1)
|
||||||
|
self._assert_quota_show(self.D.id, res, reserved=1, limit=-1)
|
||||||
|
|
||||||
|
def test_child_update_affects_allocated_and_rolls_back(self):
|
||||||
|
quota = {'gigabytes': -1, 'volumes': 3}
|
||||||
|
body = {'quota_set': quota}
|
||||||
|
self.controller.update(self.req, self.A.id, body)
|
||||||
|
quota['volumes'] = -1
|
||||||
|
self.controller.update(self.req, self.B.id, body)
|
||||||
|
quota['volumes'] = 1
|
||||||
|
self.controller.update(self.req, self.C.id, body)
|
||||||
|
|
||||||
|
# Shouldn't be able to update to greater than the grandparent
|
||||||
|
quota['volumes'] = 3
|
||||||
|
quota['gigabytes'] = 1
|
||||||
|
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||||
|
self.controller.update, self.req, self.D.id, body)
|
||||||
|
# Validate we haven't updated either parents' allocated value for
|
||||||
|
# any of the keys (even if some keys were valid)
|
||||||
|
self._assert_quota_show(self.A.id, 'volumes', allocated=1, limit=3)
|
||||||
|
self._assert_quota_show(self.A.id, 'gigabytes', limit=-1)
|
||||||
|
self._assert_quota_show(self.B.id, 'volumes', limit=-1)
|
||||||
|
self._assert_quota_show(self.B.id, 'gigabytes', limit=-1)
|
||||||
|
|
||||||
|
quota['volumes'] = 2
|
||||||
|
self.controller.update(self.req, self.D.id, body)
|
||||||
|
# Validate we have now updated the parent and grandparents'
|
||||||
|
self.req.params = {'usage': 'True'}
|
||||||
|
self._assert_quota_show(self.A.id, 'volumes', allocated=3, limit=3)
|
||||||
|
self._assert_quota_show(self.A.id, 'gigabytes', allocated=1, limit=-1)
|
||||||
|
self._assert_quota_show(self.B.id, 'volumes', allocated=2, limit=-1)
|
||||||
|
self._assert_quota_show(self.B.id, 'gigabytes', allocated=1, limit=-1)
|
||||||
|
|
||||||
|
def test_negative_child_limit_reserve_and_rollback(self):
|
||||||
|
quota = {'volumes': 2, 'gigabytes': 2}
|
||||||
|
body = {'quota_set': quota}
|
||||||
|
self.controller.update(self.req, self.A.id, body)
|
||||||
|
|
||||||
|
quota['volumes'] = -1
|
||||||
|
quota['gigabytes'] = -1
|
||||||
|
self.controller.update(self.req, self.B.id, body)
|
||||||
|
self.controller.update(self.req, self.C.id, body)
|
||||||
|
self.controller.update(self.req, self.D.id, body)
|
||||||
|
|
||||||
|
res = quotas.QUOTAS._driver.reserve(
|
||||||
|
self.req.environ['cinder.context'], quotas.QUOTAS.resources,
|
||||||
|
{'volumes': 2, 'gigabytes': 2}, project_id=self.D.id)
|
||||||
|
|
||||||
|
self.req.params = {'usage': 'True'}
|
||||||
|
quota_b = self.controller.show(self.req, self.B.id)
|
||||||
|
self.assertEqual(2, quota_b['quota_set']['volumes']['allocated'])
|
||||||
|
# A will be the next hard limit to set
|
||||||
|
quota_a = self.controller.show(self.req, self.A.id)
|
||||||
|
self.assertEqual(2, quota_a['quota_set']['volumes']['allocated'])
|
||||||
|
quota_d = self.controller.show(self.req, self.D.id)
|
||||||
|
self.assertEqual(2, quota_d['quota_set']['volumes']['reserved'])
|
||||||
|
|
||||||
|
quotas.QUOTAS.rollback(self.req.environ['cinder.context'], res,
|
||||||
|
self.D.id)
|
||||||
|
# After the rollback, A's limit should be properly set again
|
||||||
|
quota_a = self.controller.show(self.req, self.A.id)
|
||||||
|
self.assertEqual(0, quota_a['quota_set']['volumes']['allocated'])
|
||||||
|
quota_d = self.controller.show(self.req, self.D.id)
|
||||||
|
self.assertEqual(0, quota_d['quota_set']['volumes']['in_use'])
|
||||||
|
|
||||||
|
@mock.patch('cinder.db.sqlalchemy.api._get_quota_usages')
|
||||||
|
@mock.patch('cinder.db.quota_usage_get_all_by_project')
|
||||||
|
def test_nested_quota_set_negative_limit(self, mock_usage, mock_get_usage):
|
||||||
|
# TODO(mc_nair): this test should be moved to Tempest once nested quota
|
||||||
|
# coverage is added
|
||||||
|
fake_usages = {self.A.id: 1, self.B.id: 1, self.D.id: 2, self.C.id: 0}
|
||||||
|
self._create_fake_quota_usages(fake_usages)
|
||||||
|
mock_usage.side_effect = self._fake_quota_usage_get_all_by_project
|
||||||
|
|
||||||
|
class FakeUsage(object):
|
||||||
|
def __init__(self, in_use, reserved):
|
||||||
|
self.in_use = in_use
|
||||||
|
self.reserved = reserved
|
||||||
|
self.until_refresh = None
|
||||||
|
self.total = self.reserved + self.in_use
|
||||||
|
|
||||||
|
def _fake__get_quota_usages(context, session, project_id):
|
||||||
|
if not project_id:
|
||||||
|
return {}
|
||||||
|
return {'volumes': FakeUsage(fake_usages[project_id], 0)}
|
||||||
|
mock_get_usage.side_effect = _fake__get_quota_usages
|
||||||
|
|
||||||
|
# Update the project A quota.
|
||||||
|
quota_limit = {'volumes': 7}
|
||||||
|
body = {'quota_set': quota_limit}
|
||||||
|
self.controller.update(self.req, self.A.id, body)
|
||||||
|
|
||||||
|
quota_limit['volumes'] = 4
|
||||||
|
self.controller.update(self.req, self.B.id, body)
|
||||||
|
quota_limit['volumes'] = -1
|
||||||
|
self.controller.update(self.req, self.D.id, body)
|
||||||
|
|
||||||
|
quota_limit['volumes'] = 1
|
||||||
|
self.controller.update(self.req, self.C.id, body)
|
||||||
|
|
||||||
|
self.req.params['fix_allocated_quotas'] = True
|
||||||
|
self.controller.validate_setup_for_nested_quota_use(self.req)
|
||||||
|
|
||||||
|
# Validate that the allocated values look right for each project
|
||||||
|
self.req.params = {'usage': 'True'}
|
||||||
|
|
||||||
|
res = 'volumes'
|
||||||
|
# A has given 4 vols to B and 1 vol to C (from limits)
|
||||||
|
self._assert_quota_show(self.A.id, res, allocated=5, in_use=1, limit=7)
|
||||||
|
self._assert_quota_show(self.B.id, res, allocated=2, in_use=1, limit=4)
|
||||||
|
self._assert_quota_show(self.D.id, res, in_use=2, limit=-1)
|
||||||
|
self._assert_quota_show(self.C.id, res, limit=1)
|
||||||
|
|
||||||
|
# Update B to -1 limit, and make sure that A's allocated gets updated
|
||||||
|
# with B + D's in_use values (one less than current limit
|
||||||
|
quota_limit['volumes'] = -1
|
||||||
|
self.controller.update(self.req, self.B.id, body)
|
||||||
|
self._assert_quota_show(self.A.id, res, allocated=4, in_use=1, limit=7)
|
||||||
|
|
||||||
|
quota_limit['volumes'] = 6
|
||||||
|
self.assertRaises(
|
||||||
|
webob.exc.HTTPBadRequest,
|
||||||
|
self.controller.update, self.req, self.B.id, body)
|
||||||
|
|
||||||
|
quota_limit['volumes'] = 5
|
||||||
|
self.controller.update(self.req, self.B.id, body)
|
||||||
|
self._assert_quota_show(self.A.id, res, allocated=6, in_use=1, limit=7)
|
||||||
|
|
||||||
|
|
||||||
class QuotaSerializerTest(test.TestCase):
|
class QuotaSerializerTest(test.TestCase):
|
||||||
|
|
||||||
|
@ -739,6 +739,11 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
|
|||||||
self.assertIsInstance(services.c.active_backend_id.type,
|
self.assertIsInstance(services.c.active_backend_id.type,
|
||||||
self.VARCHAR_TYPE)
|
self.VARCHAR_TYPE)
|
||||||
|
|
||||||
|
def _check_066(self, engine, data):
|
||||||
|
reservations = db_utils.get_table(engine, 'reservations')
|
||||||
|
self.assertIsInstance(reservations.c.allocated_id.type,
|
||||||
|
self.INTEGER_TYPE)
|
||||||
|
|
||||||
def test_walk_versions(self):
|
def test_walk_versions(self):
|
||||||
self.walk_versions(False, False)
|
self.walk_versions(False, False)
|
||||||
|
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
|
|
||||||
import mock
|
import mock
|
||||||
@ -1387,8 +1386,10 @@ class NestedDbQuotaDriverBaseTestCase(DbQuotaDriverBaseTestCase):
|
|||||||
class FakeProject(object):
|
class FakeProject(object):
|
||||||
def __init__(self, parent_id):
|
def __init__(self, parent_id):
|
||||||
self.parent_id = parent_id
|
self.parent_id = parent_id
|
||||||
|
self.parents = {parent_id: None}
|
||||||
|
|
||||||
def fake_get_project(project_id, subtree_as_ids=False):
|
def fake_get_project(project_id, subtree_as_ids=False,
|
||||||
|
parents_as_ids=False):
|
||||||
# Enable imitation of projects with and without parents
|
# Enable imitation of projects with and without parents
|
||||||
if project_id == self._child_proj_id:
|
if project_id == self._child_proj_id:
|
||||||
return FakeProject('parent_id')
|
return FakeProject('parent_id')
|
||||||
@ -1572,28 +1573,63 @@ class NestedQuotaValidation(NestedDbQuotaDriverBaseTestCase):
|
|||||||
self.resources, self.project_tree)
|
self.resources, self.project_tree)
|
||||||
self.proj_vals['D']['limit'] = 2
|
self.proj_vals['D']['limit'] = 2
|
||||||
|
|
||||||
def test_validate_nested_quotas_negative_child_limit(self):
|
|
||||||
self.proj_vals['B']['limit'] = -1
|
|
||||||
self.assertRaises(
|
|
||||||
exception.InvalidNestedQuotaSetup,
|
|
||||||
self.driver.validate_nested_setup,
|
|
||||||
self.context, self.resources, self.project_tree)
|
|
||||||
|
|
||||||
def test_validate_nested_quotas_usage_over_limit(self):
|
def test_validate_nested_quotas_usage_over_limit(self):
|
||||||
|
|
||||||
self.proj_vals['D']['in_use'] = 5
|
self.proj_vals['D']['in_use'] = 5
|
||||||
self.assertRaises(exception.InvalidNestedQuotaSetup,
|
self.assertRaises(exception.InvalidNestedQuotaSetup,
|
||||||
self.driver.validate_nested_setup,
|
self.driver.validate_nested_setup,
|
||||||
self.context, self.resources, self.project_tree)
|
self.context, self.resources, self.project_tree)
|
||||||
|
|
||||||
def test_validate_nested_quota_bad_allocated_quotas(self):
|
def test_validate_nested_quota_bad_allocated_quotas(self):
|
||||||
|
|
||||||
self.proj_vals['A']['alloc'] = 5
|
self.proj_vals['A']['alloc'] = 5
|
||||||
self.proj_vals['B']['alloc'] = 8
|
self.proj_vals['B']['alloc'] = 8
|
||||||
self.assertRaises(exception.InvalidNestedQuotaSetup,
|
self.assertRaises(exception.InvalidNestedQuotaSetup,
|
||||||
self.driver.validate_nested_setup,
|
self.driver.validate_nested_setup,
|
||||||
self.context, self.resources, self.project_tree)
|
self.context, self.resources, self.project_tree)
|
||||||
|
|
||||||
|
def test_validate_nested_quota_negative_child_limits(self):
|
||||||
|
# Redefining the project limits with -1, doing it all in this test
|
||||||
|
# for readability
|
||||||
|
self.proj_vals = {
|
||||||
|
'A': {'limit': 8, 'in_use': 1},
|
||||||
|
'B': {'limit': -1, 'in_use': 3},
|
||||||
|
'D': {'limit': 4, 'in_use': 0},
|
||||||
|
'C': {'limit': 2, 'in_use': 2},
|
||||||
|
}
|
||||||
|
|
||||||
|
# A's child usage is 3 (from B) + 4 (from D) + 2 (from C) = 9
|
||||||
|
self.assertRaises(exception.InvalidNestedQuotaSetup,
|
||||||
|
self.driver.validate_nested_setup,
|
||||||
|
self.context, self.resources, self.project_tree)
|
||||||
|
|
||||||
|
self.proj_vals['D']['limit'] = 2
|
||||||
|
self.driver.validate_nested_setup(
|
||||||
|
self.context, self.resources, self.project_tree,
|
||||||
|
fix_allocated_quotas=True)
|
||||||
|
|
||||||
|
def test_get_cur_project_allocated(self):
|
||||||
|
# Redefining the project limits with -1, doing it all in this test
|
||||||
|
# for readability
|
||||||
|
self.proj_vals = {
|
||||||
|
# Allocated are here to simulate a bad existing value
|
||||||
|
'A': {'limit': 8, 'in_use': 1, 'alloc': 6},
|
||||||
|
'B': {'limit': -1, 'in_use': 3, 'alloc': 2},
|
||||||
|
'D': {'limit': 1, 'in_use': 0},
|
||||||
|
'C': {'limit': 2, 'in_use': 2},
|
||||||
|
}
|
||||||
|
|
||||||
|
self.driver._allocated = {}
|
||||||
|
allocated_a = self.driver._get_cur_project_allocated(
|
||||||
|
self.context, self.resources['volumes'],
|
||||||
|
self.project_tree)
|
||||||
|
|
||||||
|
# A's allocated will be:
|
||||||
|
# 2 (from C's limit) + 3 (from B's in-use) + 1 (from D's limit) = 6
|
||||||
|
self.assertEqual(6, allocated_a)
|
||||||
|
|
||||||
|
# B's allocated value should also be calculated and cached as part
|
||||||
|
# of A's calculation
|
||||||
|
self.assertEqual(1, self.driver._allocated['B']['volumes'])
|
||||||
|
|
||||||
|
|
||||||
class FakeSession(object):
|
class FakeSession(object):
|
||||||
def begin(self):
|
def begin(self):
|
||||||
@ -1605,6 +1641,9 @@ class FakeSession(object):
|
|||||||
def __exit__(self, exc_type, exc_value, exc_traceback):
|
def __exit__(self, exc_type, exc_value, exc_traceback):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
def query(self, *args, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class FakeUsage(sqa_models.QuotaUsage):
|
class FakeUsage(sqa_models.QuotaUsage):
|
||||||
def save(self, *args, **kwargs):
|
def save(self, *args, **kwargs):
|
||||||
@ -1665,15 +1704,22 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
|
|||||||
return quota_usage_ref
|
return quota_usage_ref
|
||||||
|
|
||||||
def fake_reservation_create(context, uuid, usage_id, project_id,
|
def fake_reservation_create(context, uuid, usage_id, project_id,
|
||||||
resource, delta, expire, session=None):
|
resource, delta, expire, session=None,
|
||||||
|
allocated_id=None):
|
||||||
reservation_ref = self._make_reservation(
|
reservation_ref = self._make_reservation(
|
||||||
uuid, usage_id, project_id, resource, delta, expire,
|
uuid, usage_id, project_id, resource, delta, expire,
|
||||||
timeutils.utcnow(), timeutils.utcnow())
|
timeutils.utcnow(), timeutils.utcnow(), allocated_id)
|
||||||
|
|
||||||
self.reservations_created[resource] = reservation_ref
|
self.reservations_created[resource] = reservation_ref
|
||||||
|
|
||||||
return reservation_ref
|
return reservation_ref
|
||||||
|
|
||||||
|
def fake_qagabp(context, project_id):
|
||||||
|
self.assertEqual('test_project', project_id)
|
||||||
|
return {'project_id': project_id}
|
||||||
|
|
||||||
|
self.stubs.Set(sqa_api, 'quota_allocated_get_all_by_project',
|
||||||
|
fake_qagabp)
|
||||||
self.stubs.Set(sqa_api, 'get_session', fake_get_session)
|
self.stubs.Set(sqa_api, 'get_session', fake_get_session)
|
||||||
self.stubs.Set(sqa_api, '_get_quota_usages', fake_get_quota_usages)
|
self.stubs.Set(sqa_api, '_get_quota_usages', fake_get_quota_usages)
|
||||||
self.stubs.Set(sqa_api, '_quota_usage_create', fake_quota_usage_create)
|
self.stubs.Set(sqa_api, '_quota_usage_create', fake_quota_usage_create)
|
||||||
@ -1723,7 +1769,7 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
|
|||||||
(actual, value, resource))
|
(actual, value, resource))
|
||||||
|
|
||||||
def _make_reservation(self, uuid, usage_id, project_id, resource,
|
def _make_reservation(self, uuid, usage_id, project_id, resource,
|
||||||
delta, expire, created_at, updated_at):
|
delta, expire, created_at, updated_at, alloc_id):
|
||||||
reservation_ref = sqa_models.Reservation()
|
reservation_ref = sqa_models.Reservation()
|
||||||
reservation_ref.id = len(self.reservations_created)
|
reservation_ref.id = len(self.reservations_created)
|
||||||
reservation_ref.uuid = uuid
|
reservation_ref.uuid = uuid
|
||||||
@ -1736,6 +1782,7 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
|
|||||||
reservation_ref.updated_at = updated_at
|
reservation_ref.updated_at = updated_at
|
||||||
reservation_ref.deleted_at = None
|
reservation_ref.deleted_at = None
|
||||||
reservation_ref.deleted = False
|
reservation_ref.deleted = False
|
||||||
|
reservation_ref.allocated_id = alloc_id
|
||||||
|
|
||||||
return reservation_ref
|
return reservation_ref
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ class QuotaUtilsTest(test.TestCase):
|
|||||||
project = quota_utils.get_project_hierarchy(
|
project = quota_utils.get_project_hierarchy(
|
||||||
self.context, self.context.project_id, subtree_as_ids=True)
|
self.context, self.context.project_id, subtree_as_ids=True)
|
||||||
keystoneclient.projects.get.assert_called_once_with(
|
keystoneclient.projects.get.assert_called_once_with(
|
||||||
self.context.project_id, subtree_as_ids=True)
|
self.context.project_id, parents_as_ids=False, subtree_as_ids=True)
|
||||||
self.assertEqual(expected_project.__dict__, project.__dict__)
|
self.assertEqual(expected_project.__dict__, project.__dict__)
|
||||||
|
|
||||||
@mock.patch('cinder.quota_utils._keystone_client')
|
@mock.patch('cinder.quota_utils._keystone_client')
|
||||||
|
@ -605,7 +605,9 @@ class QuotaReserveTask(flow_utils.CinderTask):
|
|||||||
usages = e.kwargs['usages']
|
usages = e.kwargs['usages']
|
||||||
|
|
||||||
def _consumed(name):
|
def _consumed(name):
|
||||||
return usages[name]['reserved'] + usages[name]['in_use']
|
usage = usages[name]
|
||||||
|
return usage['reserved'] + usage['in_use'] + usage.get(
|
||||||
|
'allocated', 0)
|
||||||
|
|
||||||
def _get_over(name):
|
def _get_over(name):
|
||||||
for over in overs:
|
for over in overs:
|
||||||
@ -616,6 +618,7 @@ class QuotaReserveTask(flow_utils.CinderTask):
|
|||||||
over_name = _get_over('gigabytes')
|
over_name = _get_over('gigabytes')
|
||||||
exceeded_vol_limit_name = _get_over('volumes')
|
exceeded_vol_limit_name = _get_over('volumes')
|
||||||
if over_name:
|
if over_name:
|
||||||
|
# TODO(mc_nair): improve error message for child -1 limit
|
||||||
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
|
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
|
||||||
"%(s_size)sG volume (%(d_consumed)dG "
|
"%(s_size)sG volume (%(d_consumed)dG "
|
||||||
"of %(d_quota)dG already consumed)")
|
"of %(d_quota)dG already consumed)")
|
||||||
@ -637,6 +640,7 @@ class QuotaReserveTask(flow_utils.CinderTask):
|
|||||||
's_pid': context.project_id,
|
's_pid': context.project_id,
|
||||||
'd_consumed':
|
'd_consumed':
|
||||||
_consumed(exceeded_vol_limit_name)})
|
_consumed(exceeded_vol_limit_name)})
|
||||||
|
# TODO(mc_nair): improve error message for child -1 limit
|
||||||
raise exception.VolumeLimitExceeded(
|
raise exception.VolumeLimitExceeded(
|
||||||
allowed=quotas[exceeded_vol_limit_name],
|
allowed=quotas[exceeded_vol_limit_name],
|
||||||
name=exceeded_vol_limit_name)
|
name=exceeded_vol_limit_name)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user