Merge "Schedule the request to scheduler when creating from snapshot/volume"
This commit is contained in:
commit
af688d3b4b
@ -137,6 +137,7 @@ OBJ_VERSIONS.add('1.26', {'Snapshot': '1.5'})
|
|||||||
OBJ_VERSIONS.add('1.27', {'Backup': '1.5', 'BackupImport': '1.5'})
|
OBJ_VERSIONS.add('1.27', {'Backup': '1.5', 'BackupImport': '1.5'})
|
||||||
OBJ_VERSIONS.add('1.28', {'Service': '1.5'})
|
OBJ_VERSIONS.add('1.28', {'Service': '1.5'})
|
||||||
OBJ_VERSIONS.add('1.29', {'Service': '1.6'})
|
OBJ_VERSIONS.add('1.29', {'Service': '1.6'})
|
||||||
|
OBJ_VERSIONS.add('1.30', {'RequestSpec': '1.2'})
|
||||||
|
|
||||||
|
|
||||||
class CinderObjectRegistry(base.VersionedObjectRegistry):
|
class CinderObjectRegistry(base.VersionedObjectRegistry):
|
||||||
|
@ -23,7 +23,8 @@ class RequestSpec(base.CinderObject, base.CinderObjectDictCompat,
|
|||||||
base.CinderComparableObject):
|
base.CinderComparableObject):
|
||||||
# Version 1.0: Initial version
|
# Version 1.0: Initial version
|
||||||
# Version 1.1: Added group_id and group_backend
|
# Version 1.1: Added group_id and group_backend
|
||||||
VERSION = '1.1'
|
# Version 1.2 Added ``resource_backend``
|
||||||
|
VERSION = '1.2'
|
||||||
|
|
||||||
fields = {
|
fields = {
|
||||||
'consistencygroup_id': fields.UUIDField(nullable=True),
|
'consistencygroup_id': fields.UUIDField(nullable=True),
|
||||||
@ -40,6 +41,7 @@ class RequestSpec(base.CinderObject, base.CinderObjectDictCompat,
|
|||||||
nullable=True),
|
nullable=True),
|
||||||
'CG_backend': fields.StringField(nullable=True),
|
'CG_backend': fields.StringField(nullable=True),
|
||||||
'group_backend': fields.StringField(nullable=True),
|
'group_backend': fields.StringField(nullable=True),
|
||||||
|
'resource_backend': fields.StringField(nullable=True)
|
||||||
}
|
}
|
||||||
|
|
||||||
obj_extra_fields = ['resource_properties']
|
obj_extra_fields = ['resource_properties']
|
||||||
|
@ -508,14 +508,15 @@ class FilterScheduler(driver.Scheduler):
|
|||||||
weighed_backends = self._get_weighted_candidates(context, request_spec,
|
weighed_backends = self._get_weighted_candidates(context, request_spec,
|
||||||
filter_properties)
|
filter_properties)
|
||||||
# When we get the weighed_backends, we clear those backends that don't
|
# When we get the weighed_backends, we clear those backends that don't
|
||||||
# match the group's backend.
|
# match the resource's backend (it could be assigend from group,
|
||||||
group_backend = request_spec.get('group_backend')
|
# snapshot or volume).
|
||||||
if weighed_backends and group_backend:
|
resource_backend = request_spec.get('resource_backend')
|
||||||
|
if weighed_backends and resource_backend:
|
||||||
# Get host name including host@backend#pool info from
|
# Get host name including host@backend#pool info from
|
||||||
# weighed_backends.
|
# weighed_backends.
|
||||||
for backend in weighed_backends[::-1]:
|
for backend in weighed_backends[::-1]:
|
||||||
backend_id = utils.extract_host(backend.obj.backend_id)
|
backend_id = utils.extract_host(backend.obj.backend_id)
|
||||||
if backend_id != group_backend:
|
if backend_id != resource_backend:
|
||||||
weighed_backends.remove(backend)
|
weighed_backends.remove(backend)
|
||||||
if not weighed_backends:
|
if not weighed_backends:
|
||||||
LOG.warning('No weighed backend found for volume '
|
LOG.warning('No weighed backend found for volume '
|
||||||
|
@ -42,7 +42,7 @@ object_data = {
|
|||||||
'ManageableVolumeList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
'ManageableVolumeList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||||
'QualityOfServiceSpecs': '1.0-0b212e0a86ee99092229874e03207fe8',
|
'QualityOfServiceSpecs': '1.0-0b212e0a86ee99092229874e03207fe8',
|
||||||
'QualityOfServiceSpecsList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
'QualityOfServiceSpecsList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||||
'RequestSpec': '1.1-b0bd1a28d191d75648901fa853e8a733',
|
'RequestSpec': '1.2-207502df46a50575a818076e1ea119db',
|
||||||
'Service': '1.6-e881b6b324151dd861e09cdfffcdaccd',
|
'Service': '1.6-e881b6b324151dd861e09cdfffcdaccd',
|
||||||
'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||||
'Snapshot': '1.5-ac1cdbd5b89588f6a8f44afdf6b8b201',
|
'Snapshot': '1.5-ac1cdbd5b89588f6a8f44afdf6b8b201',
|
||||||
|
@ -185,10 +185,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
|||||||
self.assertTrue(_mock_service_get_all.called)
|
self.assertTrue(_mock_service_get_all.called)
|
||||||
|
|
||||||
@mock.patch('cinder.db.service_get_all')
|
@mock.patch('cinder.db.service_get_all')
|
||||||
def test_create_volume_clear_host_different_with_group(
|
def test_create_volume_host_different_with_resource_backend(
|
||||||
self, _mock_service_get_all):
|
self, _mock_service_get_all):
|
||||||
# Ensure we clear those hosts whose backend is not same as
|
|
||||||
# group's backend.
|
|
||||||
sched = fakes.FakeFilterScheduler()
|
sched = fakes.FakeFilterScheduler()
|
||||||
sched.host_manager = fakes.FakeHostManager()
|
sched.host_manager = fakes.FakeHostManager()
|
||||||
fakes.mock_host_manager_db_calls(_mock_service_get_all)
|
fakes.mock_host_manager_db_calls(_mock_service_get_all)
|
||||||
@ -196,12 +194,12 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
|||||||
request_spec = {'volume_properties': {'project_id': 1,
|
request_spec = {'volume_properties': {'project_id': 1,
|
||||||
'size': 1},
|
'size': 1},
|
||||||
'volume_type': {'name': 'LVM_iSCSI'},
|
'volume_type': {'name': 'LVM_iSCSI'},
|
||||||
'group_backend': 'host@lvmdriver'}
|
'resource_backend': 'host_none'}
|
||||||
weighed_host = sched._schedule(fake_context, request_spec, {})
|
weighed_host = sched._schedule(fake_context, request_spec, {})
|
||||||
self.assertIsNone(weighed_host)
|
self.assertIsNone(weighed_host)
|
||||||
|
|
||||||
@mock.patch('cinder.db.service_get_all')
|
@mock.patch('cinder.db.service_get_all')
|
||||||
def test_create_volume_host_same_as_group(self, _mock_service_get_all):
|
def test_create_volume_host_same_as_resource(self, _mock_service_get_all):
|
||||||
# Ensure we don't clear the host whose backend is same as
|
# Ensure we don't clear the host whose backend is same as
|
||||||
# group's backend.
|
# group's backend.
|
||||||
sched = fakes.FakeFilterScheduler()
|
sched = fakes.FakeFilterScheduler()
|
||||||
@ -211,7 +209,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
|||||||
request_spec = {'volume_properties': {'project_id': 1,
|
request_spec = {'volume_properties': {'project_id': 1,
|
||||||
'size': 1},
|
'size': 1},
|
||||||
'volume_type': {'name': 'LVM_iSCSI'},
|
'volume_type': {'name': 'LVM_iSCSI'},
|
||||||
'group_backend': 'host1'}
|
'resource_backend': 'host1'}
|
||||||
weighed_host = sched._schedule(fake_context, request_spec, {})
|
weighed_host = sched._schedule(fake_context, request_spec, {})
|
||||||
self.assertEqual('host1#lvm1', weighed_host.obj.host)
|
self.assertEqual('host1#lvm1', weighed_host.obj.host)
|
||||||
|
|
||||||
|
@ -56,6 +56,60 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
|||||||
'cinder.volume.volume_types.get_volume_type_extra_specs',
|
'cinder.volume.volume_types.get_volume_type_extra_specs',
|
||||||
return_value={})
|
return_value={})
|
||||||
|
|
||||||
|
@mock.patch('cinder.objects.Volume.get_by_id')
|
||||||
|
@mock.patch('cinder.volume.utils.extract_host')
|
||||||
|
@mock.patch('time.time')
|
||||||
|
@mock.patch('cinder.objects.Snapshot.get_by_id')
|
||||||
|
def test_cast_create_volume_from_resource(self, mock_snapshot_get,
|
||||||
|
mock_time, mock_extract_host,
|
||||||
|
volume_get_by_id):
|
||||||
|
mock_time.side_effect = self.time_inc
|
||||||
|
volume = fake_volume.fake_volume_obj(self.ctxt,
|
||||||
|
host='host@backend#pool')
|
||||||
|
volume_get_by_id.return_value = volume
|
||||||
|
|
||||||
|
spec = {'volume_id': volume.id,
|
||||||
|
'volume': volume,
|
||||||
|
'source_volid': volume.id,
|
||||||
|
'snapshot_id': None,
|
||||||
|
'image_id': 4,
|
||||||
|
'consistencygroup_id': None,
|
||||||
|
'cgsnapshot_id': None,
|
||||||
|
'group_id': None, }
|
||||||
|
|
||||||
|
# Fake objects assert specs
|
||||||
|
task = create_volume.VolumeCastTask(
|
||||||
|
fake_volume_api.FakeSchedulerRpcAPI(spec, self),
|
||||||
|
fake_volume_api.FakeVolumeAPI(spec, self),
|
||||||
|
fake_volume_api.FakeDb())
|
||||||
|
|
||||||
|
task._cast_create_volume(self.ctxt, spec, {})
|
||||||
|
mock_extract_host.assert_called_once_with('host@backend#pool')
|
||||||
|
|
||||||
|
snapshot = fake_snapshot.fake_snapshot_obj(self.ctxt,
|
||||||
|
volume=volume)
|
||||||
|
mock_snapshot_get.return_value = snapshot
|
||||||
|
|
||||||
|
spec = {'volume_id': volume.id,
|
||||||
|
'volume': volume,
|
||||||
|
'source_volid': None,
|
||||||
|
'snapshot_id': snapshot.id,
|
||||||
|
'image_id': 4,
|
||||||
|
'consistencygroup_id': None,
|
||||||
|
'cgsnapshot_id': None,
|
||||||
|
'group_id': None, }
|
||||||
|
|
||||||
|
# Fake objects assert specs
|
||||||
|
task = create_volume.VolumeCastTask(
|
||||||
|
fake_volume_api.FakeSchedulerRpcAPI(spec, self),
|
||||||
|
fake_volume_api.FakeVolumeAPI(spec, self),
|
||||||
|
fake_volume_api.FakeDb())
|
||||||
|
|
||||||
|
task._cast_create_volume(self.ctxt, spec, {})
|
||||||
|
mock_snapshot_get.assert_called_once_with(self.ctxt, snapshot.id)
|
||||||
|
mock_extract_host.assert_has_calls([mock.call('host@backend#pool'),
|
||||||
|
mock.call('host@backend#pool')])
|
||||||
|
|
||||||
@mock.patch('cinder.objects.Volume.get_by_id')
|
@mock.patch('cinder.objects.Volume.get_by_id')
|
||||||
@mock.patch('cinder.volume.utils.extract_host')
|
@mock.patch('cinder.volume.utils.extract_host')
|
||||||
@mock.patch('time.time')
|
@mock.patch('time.time')
|
||||||
|
@ -1437,7 +1437,10 @@ class VolumeTestCase(base.BaseVolumeTestCase):
|
|||||||
'description',
|
'description',
|
||||||
volume_type=db_vol_type)
|
volume_type=db_vol_type)
|
||||||
|
|
||||||
volume_src['host'] = 'fake_host'
|
db.volume_update(self.context, volume_src['id'],
|
||||||
|
{'host': 'fake_host@fake_backend'})
|
||||||
|
volume_src = objects.Volume.get_by_id(self.context, volume_src['id'])
|
||||||
|
|
||||||
snapshot_ref = volume_api.create_snapshot_force(self.context,
|
snapshot_ref = volume_api.create_snapshot_force(self.context,
|
||||||
volume_src,
|
volume_src,
|
||||||
'name',
|
'name',
|
||||||
@ -1494,7 +1497,10 @@ class VolumeTestCase(base.BaseVolumeTestCase):
|
|||||||
'name',
|
'name',
|
||||||
'description',
|
'description',
|
||||||
volume_type=db_vol_type)
|
volume_type=db_vol_type)
|
||||||
volume_src['status'] = 'available' # status must be available
|
db.volume_update(self.context, volume_src['id'],
|
||||||
|
{'host': 'fake_host@fake_backend',
|
||||||
|
'status': 'available'})
|
||||||
|
volume_src = objects.Volume.get_by_id(self.context, volume_src['id'])
|
||||||
volume_dst = volume_api.create(self.context,
|
volume_dst = volume_api.create(self.context,
|
||||||
1,
|
1,
|
||||||
'name',
|
'name',
|
||||||
|
@ -14,7 +14,6 @@
|
|||||||
from castellan.common import exception as castellan_exc
|
from castellan.common import exception as castellan_exc
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import timeutils
|
|
||||||
from oslo_utils import units
|
from oslo_utils import units
|
||||||
import six
|
import six
|
||||||
import taskflow.engines
|
import taskflow.engines
|
||||||
@ -728,28 +727,24 @@ class VolumeCastTask(flow_utils.CinderTask):
|
|||||||
self.db = db
|
self.db = db
|
||||||
|
|
||||||
def _cast_create_volume(self, context, request_spec, filter_properties):
|
def _cast_create_volume(self, context, request_spec, filter_properties):
|
||||||
source_volume_ref = None
|
|
||||||
source_volid = request_spec['source_volid']
|
source_volid = request_spec['source_volid']
|
||||||
volume = request_spec['volume']
|
volume = request_spec['volume']
|
||||||
snapshot_id = request_spec['snapshot_id']
|
snapshot_id = request_spec['snapshot_id']
|
||||||
image_id = request_spec['image_id']
|
image_id = request_spec['image_id']
|
||||||
cgroup_id = request_spec['consistencygroup_id']
|
cgroup_id = request_spec['consistencygroup_id']
|
||||||
cgsnapshot_id = request_spec['cgsnapshot_id']
|
|
||||||
group_id = request_spec['group_id']
|
group_id = request_spec['group_id']
|
||||||
if cgroup_id:
|
if cgroup_id:
|
||||||
# If cgroup_id existed, we should cast volume to the scheduler
|
# If cgroup_id existed, we should cast volume to the scheduler
|
||||||
# to choose a proper pool whose backend is same as CG's backend.
|
# to choose a proper pool whose backend is same as CG's backend.
|
||||||
cgroup = objects.ConsistencyGroup.get_by_id(context, cgroup_id)
|
cgroup = objects.ConsistencyGroup.get_by_id(context, cgroup_id)
|
||||||
request_spec['CG_backend'] = vol_utils.extract_host(cgroup.host)
|
request_spec['resource_backend'] = vol_utils.extract_host(
|
||||||
|
cgroup.host)
|
||||||
elif group_id:
|
elif group_id:
|
||||||
# If group_id exists, we should cast volume to the scheduler
|
# If group_id exists, we should cast volume to the scheduler
|
||||||
# to choose a proper pool whose backend is same as group's backend.
|
# to choose a proper pool whose backend is same as group's backend.
|
||||||
group = objects.Group.get_by_id(context, group_id)
|
group = objects.Group.get_by_id(context, group_id)
|
||||||
# FIXME(wanghao): group_backend got added before request_spec was
|
request_spec['resource_backend'] = vol_utils.extract_host(
|
||||||
# converted to versioned objects. We should make sure that this
|
group.host)
|
||||||
# will be handled by object version translations once we add
|
|
||||||
# RequestSpec object.
|
|
||||||
request_spec['group_backend'] = vol_utils.extract_host(group.host)
|
|
||||||
elif snapshot_id and CONF.snapshot_same_host:
|
elif snapshot_id and CONF.snapshot_same_host:
|
||||||
# NOTE(Rongze Zhu): A simple solution for bug 1008866.
|
# NOTE(Rongze Zhu): A simple solution for bug 1008866.
|
||||||
#
|
#
|
||||||
@ -757,35 +752,24 @@ class VolumeCastTask(flow_utils.CinderTask):
|
|||||||
# the call create volume directly to the volume host where the
|
# the call create volume directly to the volume host where the
|
||||||
# snapshot resides instead of passing it through the scheduler, so
|
# snapshot resides instead of passing it through the scheduler, so
|
||||||
# snapshot can be copied to the new volume.
|
# snapshot can be copied to the new volume.
|
||||||
|
# NOTE(tommylikehu): In order to check the backend's capacity
|
||||||
|
# before creating volume, we schedule this request to scheduler
|
||||||
|
# service with the desired backend information.
|
||||||
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
|
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
|
||||||
source_volume_ref = snapshot.volume
|
request_spec['resource_backend'] = vol_utils.extract_host(
|
||||||
|
snapshot.volume.host)
|
||||||
elif source_volid:
|
elif source_volid:
|
||||||
source_volume_ref = objects.Volume.get_by_id(context, source_volid)
|
source_volume_ref = objects.Volume.get_by_id(context, source_volid)
|
||||||
|
request_spec['resource_backend'] = vol_utils.extract_host(
|
||||||
|
source_volume_ref.host)
|
||||||
|
|
||||||
if not source_volume_ref:
|
self.scheduler_rpcapi.create_volume(
|
||||||
# Cast to the scheduler and let it handle whatever is needed
|
context,
|
||||||
# to select the target host for this volume.
|
volume,
|
||||||
self.scheduler_rpcapi.create_volume(
|
snapshot_id=snapshot_id,
|
||||||
context,
|
image_id=image_id,
|
||||||
volume,
|
request_spec=request_spec,
|
||||||
snapshot_id=snapshot_id,
|
filter_properties=filter_properties)
|
||||||
image_id=image_id,
|
|
||||||
request_spec=request_spec,
|
|
||||||
filter_properties=filter_properties)
|
|
||||||
else:
|
|
||||||
# Bypass the scheduler and send the request directly to the volume
|
|
||||||
# manager.
|
|
||||||
volume.host = source_volume_ref.host
|
|
||||||
volume.cluster_name = source_volume_ref.cluster_name
|
|
||||||
volume.scheduled_at = timeutils.utcnow()
|
|
||||||
volume.save()
|
|
||||||
if not cgsnapshot_id:
|
|
||||||
self.volume_rpcapi.create_volume(
|
|
||||||
context,
|
|
||||||
volume,
|
|
||||||
request_spec,
|
|
||||||
filter_properties,
|
|
||||||
allow_reschedule=False)
|
|
||||||
|
|
||||||
def execute(self, context, **kwargs):
|
def execute(self, context, **kwargs):
|
||||||
scheduler_hints = kwargs.pop('scheduler_hints', None)
|
scheduler_hints = kwargs.pop('scheduler_hints', None)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user