Merge "Add per-backend availability zones"
This commit is contained in:
commit
e8bbccd42b
@ -117,7 +117,9 @@ global_opts = [
|
||||
# NOTE(vish): default to nova for compatibility with nova installs
|
||||
cfg.StrOpt('storage_availability_zone',
|
||||
default='nova',
|
||||
help='Availability zone of this node'),
|
||||
help='Availability zone of this node. Can be overridden per '
|
||||
'volume backend with the option '
|
||||
'"backend_availability_zone".'),
|
||||
cfg.StrOpt('default_availability_zone',
|
||||
help='Default availability zone for new volumes. If not set, '
|
||||
'the storage_availability_zone option value is used as '
|
||||
|
@ -91,6 +91,7 @@ class Manager(base.Base, PeriodicTasks):
|
||||
self.host = host
|
||||
self.cluster = cluster
|
||||
self.additional_endpoints = []
|
||||
self.availability_zone = CONF.storage_availability_zone
|
||||
super(Manager, self).__init__(db_driver)
|
||||
|
||||
@property
|
||||
|
@ -145,6 +145,11 @@ class Service(service.Service):
|
||||
manager_class = profiler.trace_cls("rpc")(manager_class)
|
||||
|
||||
self.service = None
|
||||
self.manager = manager_class(host=self.host,
|
||||
cluster=self.cluster,
|
||||
service_name=service_name,
|
||||
*args, **kwargs)
|
||||
self.availability_zone = self.manager.availability_zone
|
||||
|
||||
# NOTE(geguileo): We need to create the Service DB entry before we
|
||||
# create the manager, otherwise capped versions for serializer and rpc
|
||||
@ -201,10 +206,6 @@ class Service(service.Service):
|
||||
# start while we are still doing the rolling upgrade.
|
||||
self.added_to_cluster = not self.is_upgrading_to_n
|
||||
|
||||
self.manager = manager_class(host=self.host,
|
||||
cluster=self.cluster,
|
||||
service_name=service_name,
|
||||
*args, **kwargs)
|
||||
self.report_interval = report_interval
|
||||
self.periodic_interval = periodic_interval
|
||||
self.periodic_fuzzy_delay = periodic_fuzzy_delay
|
||||
@ -356,13 +357,12 @@ class Service(service.Service):
|
||||
pass
|
||||
|
||||
def _create_service_ref(self, context, rpc_version=None):
|
||||
zone = CONF.storage_availability_zone
|
||||
kwargs = {
|
||||
'host': self.host,
|
||||
'binary': self.binary,
|
||||
'topic': self.topic,
|
||||
'report_count': 0,
|
||||
'availability_zone': zone,
|
||||
'availability_zone': self.availability_zone,
|
||||
'rpc_current_version': rpc_version or self.manager.RPC_API_VERSION,
|
||||
'object_current_version': objects_base.OBJ_VERSIONS.get_current(),
|
||||
}
|
||||
@ -486,7 +486,6 @@ class Service(service.Service):
|
||||
return
|
||||
|
||||
ctxt = context.get_admin_context()
|
||||
zone = CONF.storage_availability_zone
|
||||
try:
|
||||
try:
|
||||
service_ref = objects.Service.get_by_id(ctxt,
|
||||
@ -499,8 +498,8 @@ class Service(service.Service):
|
||||
Service.service_id)
|
||||
|
||||
service_ref.report_count += 1
|
||||
if zone != service_ref.availability_zone:
|
||||
service_ref.availability_zone = zone
|
||||
if self.availability_zone != service_ref.availability_zone:
|
||||
service_ref.availability_zone = self.availability_zone
|
||||
|
||||
service_ref.save()
|
||||
|
||||
|
@ -116,6 +116,36 @@ class VolumeTestCase(base.BaseVolumeTestCase):
|
||||
scheduler_rpcapi.client.serializer._base.version_cap)
|
||||
self.assertIsNone(scheduler_rpcapi.client.serializer._base.manifest)
|
||||
|
||||
@mock.patch('oslo_utils.importutils.import_object')
|
||||
def test_backend_availability_zone(self, mock_import_object):
|
||||
# NOTE(smcginnis): This isn't really the best place for this test,
|
||||
# but we don't currently have a pure VolumeManager test class. So
|
||||
# until we create a good suite for that class, putting here with
|
||||
# other tests that use VolumeManager.
|
||||
|
||||
opts = {
|
||||
'backend_availability_zone': 'caerbannog'
|
||||
}
|
||||
|
||||
def conf_get(option):
|
||||
if option in opts:
|
||||
return opts[option]
|
||||
return None
|
||||
|
||||
mock_driver = mock.Mock()
|
||||
mock_driver.configuration.safe_get.side_effect = conf_get
|
||||
mock_driver.configuration.extra_capabilities = 'null'
|
||||
|
||||
def import_obj(*args, **kwargs):
|
||||
return mock_driver
|
||||
|
||||
mock_import_object.side_effect = import_obj
|
||||
|
||||
manager = vol_manager.VolumeManager(volume_driver=mock_driver)
|
||||
self.assertIsNotNone(manager)
|
||||
self.assertEqual(opts['backend_availability_zone'],
|
||||
manager.availability_zone)
|
||||
|
||||
@mock.patch.object(vol_manager.VolumeManager,
|
||||
'update_service_capabilities')
|
||||
def test_report_filter_goodness_function(self, mock_update):
|
||||
|
@ -266,6 +266,11 @@ volume_opts = [
|
||||
"working CI system and testing are marked as unsupported "
|
||||
"until CI is working again. This also marks a driver as "
|
||||
"deprecated and may be removed in the next release."),
|
||||
cfg.StrOpt('backend_availability_zone',
|
||||
default=None,
|
||||
help='Availability zone for this volume backend. If not set, '
|
||||
'the storage_availability_zone option value is used as '
|
||||
'the default for all backends.'),
|
||||
]
|
||||
|
||||
# for backward compatibility
|
||||
|
@ -268,6 +268,12 @@ class VolumeManager(manager.CleanableManager,
|
||||
LOG.error(_LE("Invalid JSON: %s"),
|
||||
self.driver.configuration.extra_capabilities)
|
||||
|
||||
# Check if a per-backend AZ has been specified
|
||||
backend_zone = self.driver.configuration.safe_get(
|
||||
'backend_availability_zone')
|
||||
if backend_zone:
|
||||
self.availability_zone = backend_zone
|
||||
|
||||
if self.driver.configuration.safe_get(
|
||||
'image_volume_cache_enabled'):
|
||||
|
||||
|
7
releasenotes/notes/per-backend-az-28727aca360a1cc8.yaml
Normal file
7
releasenotes/notes/per-backend-az-28727aca360a1cc8.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Availability zones may now be configured per backend in a multi-backend
|
||||
configuration. Individual backend sections can now set the configuration
|
||||
option ``backend_availability_zone``. If set, this value will override
|
||||
the [DEFAULT] ``storage_availability_zone`` setting.
|
Loading…
x
Reference in New Issue
Block a user