Set the default availability zone back to nova
Currently nova and cinder have to have the same set of az's for the ec2 api to work properly. Therefore, set the default az for cinder to nova so they will match. Includes a hack to az to set cinder to nova to work around gate issues. The hack can be removed once the gate sets the zone to nova properly. Fixes bug 1053508 Change-Id: I9c30ccc5c79b9c4e6c1ebeb3e80d0098e1ffbb11
This commit is contained in:
parent
32721a67ae
commit
d051ca9cdf
@ -319,6 +319,9 @@ class VolumeController(wsgi.Controller):
|
||||
kwargs['image_id'] = image_uuid
|
||||
|
||||
kwargs['availability_zone'] = volume.get('availability_zone', None)
|
||||
# NOTE(vish): Temporary hack to work around gating issues
|
||||
if kwargs['availability_zone'] == 'cinder':
|
||||
kwargs['availability_zone'] = 'nova'
|
||||
|
||||
new_volume = self.volume_api.create(context,
|
||||
size,
|
||||
|
@ -177,8 +177,9 @@ global_opts = [
|
||||
default=socket.gethostname(),
|
||||
help='Name of this node. This can be an opaque identifier. '
|
||||
'It is not necessarily a hostname, FQDN, or IP address.'),
|
||||
# NOTE(vish): default to nova for compatibility with nova installs
|
||||
cfg.StrOpt('storage_availability_zone',
|
||||
default='cinder',
|
||||
default='nova',
|
||||
help='availability zone of this node'),
|
||||
cfg.ListOpt('memcached_servers',
|
||||
default=None,
|
||||
|
@ -109,11 +109,11 @@ class VolumeApiTest(test.TestCase):
|
||||
vol = {"size": '1',
|
||||
"display_name": "Volume Test Name",
|
||||
"display_description": "Volume Test Desc",
|
||||
"availability_zone": "cinder",
|
||||
"availability_zone": "nova",
|
||||
"imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'}
|
||||
expected = {'volume': {'status': 'fakestatus',
|
||||
'display_description': 'Volume Test Desc',
|
||||
'availability_zone': 'cinder',
|
||||
'availability_zone': 'nova',
|
||||
'display_name': 'Volume Test Name',
|
||||
'attachments': [{'device': '/',
|
||||
'server_id': 'fakeuuid',
|
||||
|
@ -124,7 +124,7 @@ class VolumesTest(integrated_helpers._IntegratedTestBase):
|
||||
self.assertEquals(1, len(create_actions))
|
||||
create_action = create_actions[0]
|
||||
self.assertEquals(create_action['id'], created_volume_id)
|
||||
self.assertEquals(create_action['availability_zone'], 'cinder')
|
||||
self.assertEquals(create_action['availability_zone'], 'nova')
|
||||
self.assertEquals(create_action['size'], 1)
|
||||
|
||||
export_actions = driver.LoggingVolumeDriver.logs_like(
|
||||
@ -133,7 +133,7 @@ class VolumesTest(integrated_helpers._IntegratedTestBase):
|
||||
self.assertEquals(1, len(export_actions))
|
||||
export_action = export_actions[0]
|
||||
self.assertEquals(export_action['id'], created_volume_id)
|
||||
self.assertEquals(export_action['availability_zone'], 'cinder')
|
||||
self.assertEquals(export_action['availability_zone'], 'nova')
|
||||
|
||||
delete_actions = driver.LoggingVolumeDriver.logs_like(
|
||||
'delete_volume',
|
||||
|
@ -129,12 +129,12 @@ class ServiceTestCase(test.TestCase):
|
||||
'binary': binary,
|
||||
'topic': topic,
|
||||
'report_count': 0,
|
||||
'availability_zone': 'cinder'}
|
||||
'availability_zone': 'nova'}
|
||||
service_ref = {'host': host,
|
||||
'binary': binary,
|
||||
'topic': topic,
|
||||
'report_count': 0,
|
||||
'availability_zone': 'cinder',
|
||||
'availability_zone': 'nova',
|
||||
'id': 1}
|
||||
|
||||
service.db.service_get_by_args(mox.IgnoreArg(),
|
||||
@ -162,12 +162,12 @@ class ServiceTestCase(test.TestCase):
|
||||
'binary': binary,
|
||||
'topic': topic,
|
||||
'report_count': 0,
|
||||
'availability_zone': 'cinder'}
|
||||
'availability_zone': 'nova'}
|
||||
service_ref = {'host': host,
|
||||
'binary': binary,
|
||||
'topic': topic,
|
||||
'report_count': 0,
|
||||
'availability_zone': 'cinder',
|
||||
'availability_zone': 'nova',
|
||||
'id': 1}
|
||||
|
||||
service.db.service_get_by_args(mox.IgnoreArg(),
|
||||
|
Loading…
x
Reference in New Issue
Block a user