Merge "Schedule the request to scheduler when creating from snapshot/volume"
This commit is contained in:
commit
af688d3b4b
@ -137,6 +137,7 @@ OBJ_VERSIONS.add('1.26', {'Snapshot': '1.5'})
|
||||
OBJ_VERSIONS.add('1.27', {'Backup': '1.5', 'BackupImport': '1.5'})
|
||||
OBJ_VERSIONS.add('1.28', {'Service': '1.5'})
|
||||
OBJ_VERSIONS.add('1.29', {'Service': '1.6'})
|
||||
OBJ_VERSIONS.add('1.30', {'RequestSpec': '1.2'})
|
||||
|
||||
|
||||
class CinderObjectRegistry(base.VersionedObjectRegistry):
|
||||
|
@ -23,7 +23,8 @@ class RequestSpec(base.CinderObject, base.CinderObjectDictCompat,
|
||||
base.CinderComparableObject):
|
||||
# Version 1.0: Initial version
|
||||
# Version 1.1: Added group_id and group_backend
|
||||
VERSION = '1.1'
|
||||
# Version 1.2 Added ``resource_backend``
|
||||
VERSION = '1.2'
|
||||
|
||||
fields = {
|
||||
'consistencygroup_id': fields.UUIDField(nullable=True),
|
||||
@ -40,6 +41,7 @@ class RequestSpec(base.CinderObject, base.CinderObjectDictCompat,
|
||||
nullable=True),
|
||||
'CG_backend': fields.StringField(nullable=True),
|
||||
'group_backend': fields.StringField(nullable=True),
|
||||
'resource_backend': fields.StringField(nullable=True)
|
||||
}
|
||||
|
||||
obj_extra_fields = ['resource_properties']
|
||||
|
@ -508,14 +508,15 @@ class FilterScheduler(driver.Scheduler):
|
||||
weighed_backends = self._get_weighted_candidates(context, request_spec,
|
||||
filter_properties)
|
||||
# When we get the weighed_backends, we clear those backends that don't
|
||||
# match the group's backend.
|
||||
group_backend = request_spec.get('group_backend')
|
||||
if weighed_backends and group_backend:
|
||||
# match the resource's backend (it could be assigend from group,
|
||||
# snapshot or volume).
|
||||
resource_backend = request_spec.get('resource_backend')
|
||||
if weighed_backends and resource_backend:
|
||||
# Get host name including host@backend#pool info from
|
||||
# weighed_backends.
|
||||
for backend in weighed_backends[::-1]:
|
||||
backend_id = utils.extract_host(backend.obj.backend_id)
|
||||
if backend_id != group_backend:
|
||||
if backend_id != resource_backend:
|
||||
weighed_backends.remove(backend)
|
||||
if not weighed_backends:
|
||||
LOG.warning('No weighed backend found for volume '
|
||||
|
@ -42,7 +42,7 @@ object_data = {
|
||||
'ManageableVolumeList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
'QualityOfServiceSpecs': '1.0-0b212e0a86ee99092229874e03207fe8',
|
||||
'QualityOfServiceSpecsList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
'RequestSpec': '1.1-b0bd1a28d191d75648901fa853e8a733',
|
||||
'RequestSpec': '1.2-207502df46a50575a818076e1ea119db',
|
||||
'Service': '1.6-e881b6b324151dd861e09cdfffcdaccd',
|
||||
'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
'Snapshot': '1.5-ac1cdbd5b89588f6a8f44afdf6b8b201',
|
||||
|
@ -185,10 +185,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
self.assertTrue(_mock_service_get_all.called)
|
||||
|
||||
@mock.patch('cinder.db.service_get_all')
|
||||
def test_create_volume_clear_host_different_with_group(
|
||||
def test_create_volume_host_different_with_resource_backend(
|
||||
self, _mock_service_get_all):
|
||||
# Ensure we clear those hosts whose backend is not same as
|
||||
# group's backend.
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
sched.host_manager = fakes.FakeHostManager()
|
||||
fakes.mock_host_manager_db_calls(_mock_service_get_all)
|
||||
@ -196,12 +194,12 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
request_spec = {'volume_properties': {'project_id': 1,
|
||||
'size': 1},
|
||||
'volume_type': {'name': 'LVM_iSCSI'},
|
||||
'group_backend': 'host@lvmdriver'}
|
||||
'resource_backend': 'host_none'}
|
||||
weighed_host = sched._schedule(fake_context, request_spec, {})
|
||||
self.assertIsNone(weighed_host)
|
||||
|
||||
@mock.patch('cinder.db.service_get_all')
|
||||
def test_create_volume_host_same_as_group(self, _mock_service_get_all):
|
||||
def test_create_volume_host_same_as_resource(self, _mock_service_get_all):
|
||||
# Ensure we don't clear the host whose backend is same as
|
||||
# group's backend.
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
@ -211,7 +209,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
request_spec = {'volume_properties': {'project_id': 1,
|
||||
'size': 1},
|
||||
'volume_type': {'name': 'LVM_iSCSI'},
|
||||
'group_backend': 'host1'}
|
||||
'resource_backend': 'host1'}
|
||||
weighed_host = sched._schedule(fake_context, request_spec, {})
|
||||
self.assertEqual('host1#lvm1', weighed_host.obj.host)
|
||||
|
||||
|
@ -56,6 +56,60 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
'cinder.volume.volume_types.get_volume_type_extra_specs',
|
||||
return_value={})
|
||||
|
||||
@mock.patch('cinder.objects.Volume.get_by_id')
|
||||
@mock.patch('cinder.volume.utils.extract_host')
|
||||
@mock.patch('time.time')
|
||||
@mock.patch('cinder.objects.Snapshot.get_by_id')
|
||||
def test_cast_create_volume_from_resource(self, mock_snapshot_get,
|
||||
mock_time, mock_extract_host,
|
||||
volume_get_by_id):
|
||||
mock_time.side_effect = self.time_inc
|
||||
volume = fake_volume.fake_volume_obj(self.ctxt,
|
||||
host='host@backend#pool')
|
||||
volume_get_by_id.return_value = volume
|
||||
|
||||
spec = {'volume_id': volume.id,
|
||||
'volume': volume,
|
||||
'source_volid': volume.id,
|
||||
'snapshot_id': None,
|
||||
'image_id': 4,
|
||||
'consistencygroup_id': None,
|
||||
'cgsnapshot_id': None,
|
||||
'group_id': None, }
|
||||
|
||||
# Fake objects assert specs
|
||||
task = create_volume.VolumeCastTask(
|
||||
fake_volume_api.FakeSchedulerRpcAPI(spec, self),
|
||||
fake_volume_api.FakeVolumeAPI(spec, self),
|
||||
fake_volume_api.FakeDb())
|
||||
|
||||
task._cast_create_volume(self.ctxt, spec, {})
|
||||
mock_extract_host.assert_called_once_with('host@backend#pool')
|
||||
|
||||
snapshot = fake_snapshot.fake_snapshot_obj(self.ctxt,
|
||||
volume=volume)
|
||||
mock_snapshot_get.return_value = snapshot
|
||||
|
||||
spec = {'volume_id': volume.id,
|
||||
'volume': volume,
|
||||
'source_volid': None,
|
||||
'snapshot_id': snapshot.id,
|
||||
'image_id': 4,
|
||||
'consistencygroup_id': None,
|
||||
'cgsnapshot_id': None,
|
||||
'group_id': None, }
|
||||
|
||||
# Fake objects assert specs
|
||||
task = create_volume.VolumeCastTask(
|
||||
fake_volume_api.FakeSchedulerRpcAPI(spec, self),
|
||||
fake_volume_api.FakeVolumeAPI(spec, self),
|
||||
fake_volume_api.FakeDb())
|
||||
|
||||
task._cast_create_volume(self.ctxt, spec, {})
|
||||
mock_snapshot_get.assert_called_once_with(self.ctxt, snapshot.id)
|
||||
mock_extract_host.assert_has_calls([mock.call('host@backend#pool'),
|
||||
mock.call('host@backend#pool')])
|
||||
|
||||
@mock.patch('cinder.objects.Volume.get_by_id')
|
||||
@mock.patch('cinder.volume.utils.extract_host')
|
||||
@mock.patch('time.time')
|
||||
|
@ -1437,7 +1437,10 @@ class VolumeTestCase(base.BaseVolumeTestCase):
|
||||
'description',
|
||||
volume_type=db_vol_type)
|
||||
|
||||
volume_src['host'] = 'fake_host'
|
||||
db.volume_update(self.context, volume_src['id'],
|
||||
{'host': 'fake_host@fake_backend'})
|
||||
volume_src = objects.Volume.get_by_id(self.context, volume_src['id'])
|
||||
|
||||
snapshot_ref = volume_api.create_snapshot_force(self.context,
|
||||
volume_src,
|
||||
'name',
|
||||
@ -1494,7 +1497,10 @@ class VolumeTestCase(base.BaseVolumeTestCase):
|
||||
'name',
|
||||
'description',
|
||||
volume_type=db_vol_type)
|
||||
volume_src['status'] = 'available' # status must be available
|
||||
db.volume_update(self.context, volume_src['id'],
|
||||
{'host': 'fake_host@fake_backend',
|
||||
'status': 'available'})
|
||||
volume_src = objects.Volume.get_by_id(self.context, volume_src['id'])
|
||||
volume_dst = volume_api.create(self.context,
|
||||
1,
|
||||
'name',
|
||||
|
@ -14,7 +14,6 @@
|
||||
from castellan.common import exception as castellan_exc
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import timeutils
|
||||
from oslo_utils import units
|
||||
import six
|
||||
import taskflow.engines
|
||||
@ -728,28 +727,24 @@ class VolumeCastTask(flow_utils.CinderTask):
|
||||
self.db = db
|
||||
|
||||
def _cast_create_volume(self, context, request_spec, filter_properties):
|
||||
source_volume_ref = None
|
||||
source_volid = request_spec['source_volid']
|
||||
volume = request_spec['volume']
|
||||
snapshot_id = request_spec['snapshot_id']
|
||||
image_id = request_spec['image_id']
|
||||
cgroup_id = request_spec['consistencygroup_id']
|
||||
cgsnapshot_id = request_spec['cgsnapshot_id']
|
||||
group_id = request_spec['group_id']
|
||||
if cgroup_id:
|
||||
# If cgroup_id existed, we should cast volume to the scheduler
|
||||
# to choose a proper pool whose backend is same as CG's backend.
|
||||
cgroup = objects.ConsistencyGroup.get_by_id(context, cgroup_id)
|
||||
request_spec['CG_backend'] = vol_utils.extract_host(cgroup.host)
|
||||
request_spec['resource_backend'] = vol_utils.extract_host(
|
||||
cgroup.host)
|
||||
elif group_id:
|
||||
# If group_id exists, we should cast volume to the scheduler
|
||||
# to choose a proper pool whose backend is same as group's backend.
|
||||
group = objects.Group.get_by_id(context, group_id)
|
||||
# FIXME(wanghao): group_backend got added before request_spec was
|
||||
# converted to versioned objects. We should make sure that this
|
||||
# will be handled by object version translations once we add
|
||||
# RequestSpec object.
|
||||
request_spec['group_backend'] = vol_utils.extract_host(group.host)
|
||||
request_spec['resource_backend'] = vol_utils.extract_host(
|
||||
group.host)
|
||||
elif snapshot_id and CONF.snapshot_same_host:
|
||||
# NOTE(Rongze Zhu): A simple solution for bug 1008866.
|
||||
#
|
||||
@ -757,35 +752,24 @@ class VolumeCastTask(flow_utils.CinderTask):
|
||||
# the call create volume directly to the volume host where the
|
||||
# snapshot resides instead of passing it through the scheduler, so
|
||||
# snapshot can be copied to the new volume.
|
||||
# NOTE(tommylikehu): In order to check the backend's capacity
|
||||
# before creating volume, we schedule this request to scheduler
|
||||
# service with the desired backend information.
|
||||
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
|
||||
source_volume_ref = snapshot.volume
|
||||
request_spec['resource_backend'] = vol_utils.extract_host(
|
||||
snapshot.volume.host)
|
||||
elif source_volid:
|
||||
source_volume_ref = objects.Volume.get_by_id(context, source_volid)
|
||||
request_spec['resource_backend'] = vol_utils.extract_host(
|
||||
source_volume_ref.host)
|
||||
|
||||
if not source_volume_ref:
|
||||
# Cast to the scheduler and let it handle whatever is needed
|
||||
# to select the target host for this volume.
|
||||
self.scheduler_rpcapi.create_volume(
|
||||
context,
|
||||
volume,
|
||||
snapshot_id=snapshot_id,
|
||||
image_id=image_id,
|
||||
request_spec=request_spec,
|
||||
filter_properties=filter_properties)
|
||||
else:
|
||||
# Bypass the scheduler and send the request directly to the volume
|
||||
# manager.
|
||||
volume.host = source_volume_ref.host
|
||||
volume.cluster_name = source_volume_ref.cluster_name
|
||||
volume.scheduled_at = timeutils.utcnow()
|
||||
volume.save()
|
||||
if not cgsnapshot_id:
|
||||
self.volume_rpcapi.create_volume(
|
||||
context,
|
||||
volume,
|
||||
request_spec,
|
||||
filter_properties,
|
||||
allow_reschedule=False)
|
||||
self.scheduler_rpcapi.create_volume(
|
||||
context,
|
||||
volume,
|
||||
snapshot_id=snapshot_id,
|
||||
image_id=image_id,
|
||||
request_spec=request_spec,
|
||||
filter_properties=filter_properties)
|
||||
|
||||
def execute(self, context, **kwargs):
|
||||
scheduler_hints = kwargs.pop('scheduler_hints', None)
|
||||
|
Loading…
x
Reference in New Issue
Block a user