Use constants for cinder-volume
Since we have defined constants.VOLUME_BINARY, this patch change some cinder volume binary strings to use constans.VOLUME_BINARY. Change-Id: I91b1ed2331a3b197a2ba39fa5cfb02e9d161d709
This commit is contained in:
parent
82ef221114
commit
91953041c0
@ -18,6 +18,7 @@ import oslo_messaging
|
||||
from cinder.api import extensions
|
||||
from cinder.api.openstack import wsgi
|
||||
from cinder.api.views import capabilities as capabilities_view
|
||||
from cinder.common import constants
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder import objects
|
||||
@ -41,7 +42,7 @@ class CapabilitiesController(wsgi.Controller):
|
||||
"""Return capabilities list of given backend."""
|
||||
context = req.environ['cinder.context']
|
||||
context.authorize(policy.CAPABILITIES_POLICY)
|
||||
filters = {'host_or_cluster': id, 'binary': 'cinder-volume'}
|
||||
filters = {'host_or_cluster': id, 'binary': constants.VOLUME_BINARY}
|
||||
services = objects.ServiceList.get_all(context, filters)
|
||||
if not services:
|
||||
msg = (_("Can't find service: %s") % id)
|
||||
|
@ -101,7 +101,7 @@ class ServiceController(wsgi.Controller):
|
||||
|
||||
if detailed:
|
||||
ret_fields['disabled_reason'] = svc.disabled_reason
|
||||
if svc.binary == "cinder-volume":
|
||||
if svc.binary == constants.VOLUME_BINARY:
|
||||
ret_fields['replication_status'] = svc.replication_status
|
||||
ret_fields['active_backend_id'] = svc.active_backend_id
|
||||
ret_fields['frozen'] = svc.frozen
|
||||
|
@ -16,6 +16,7 @@
|
||||
from cinder.api import microversions as mv
|
||||
from cinder.api.openstack import wsgi
|
||||
from cinder.api.v3.views import clusters as clusters_view
|
||||
from cinder.common import constants
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder import objects
|
||||
@ -30,7 +31,7 @@ class ClusterController(wsgi.Controller):
|
||||
replication_fields = {'replication_status', 'frozen', 'active_backend_id'}
|
||||
|
||||
@wsgi.Controller.api_version(mv.CLUSTER_SUPPORT)
|
||||
def show(self, req, id, binary='cinder-volume'):
|
||||
def show(self, req, id, binary=constants.VOLUME_BINARY):
|
||||
"""Return data for a given cluster name with optional binary."""
|
||||
# Let the wsgi middleware convert NotAuthorized exceptions
|
||||
context = req.environ['cinder.context']
|
||||
@ -111,7 +112,7 @@ class ClusterController(wsgi.Controller):
|
||||
if not name:
|
||||
raise exception.MissingRequired(element='name')
|
||||
|
||||
binary = body.get('binary', 'cinder-volume')
|
||||
binary = body.get('binary', constants.VOLUME_BINARY)
|
||||
|
||||
# Let wsgi handle NotFound exception
|
||||
cluster = objects.Cluster.get_by_id(context, None, binary=binary,
|
||||
|
@ -19,6 +19,7 @@ from oslo_utils import uuidutils
|
||||
from cinder.api import microversions as mv
|
||||
from cinder.api.openstack import wsgi
|
||||
from cinder.api.v3.views import workers as workers_view
|
||||
from cinder.common import constants
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
@ -43,7 +44,7 @@ class WorkerController(wsgi.Controller):
|
||||
msg = _('Invalid filter keys: %s') % ', '.join(invalid_keys)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
if params.get('binary') not in (None, 'cinder-volume',
|
||||
if params.get('binary') not in (None, constants.VOLUME_BINARY,
|
||||
'cinder-scheduler'):
|
||||
msg = _('binary must be empty or set to cinder-volume or '
|
||||
'cinder-scheduler')
|
||||
|
@ -69,6 +69,7 @@ from oslo_utils import timeutils
|
||||
|
||||
# Need to register global_opts
|
||||
from cinder.common import config # noqa
|
||||
from cinder.common import constants
|
||||
from cinder import context
|
||||
from cinder import db
|
||||
from cinder.db import migration as db_migration
|
||||
@ -92,7 +93,8 @@ def _get_non_shared_target_hosts(ctxt):
|
||||
rpc.init(CONF)
|
||||
rpcapi = volume_rpcapi.VolumeAPI()
|
||||
|
||||
services = objects.ServiceList.get_all_by_topic(ctxt, 'cinder-volume')
|
||||
services = objects.ServiceList.get_all_by_topic(ctxt,
|
||||
constants.VOLUME_TOPIC)
|
||||
for service in services:
|
||||
capabilities = rpcapi.get_capabilities(ctxt, service.host, True)
|
||||
if not capabilities.get('shared_targets', True):
|
||||
|
@ -45,6 +45,7 @@ i18n.enable_lazy()
|
||||
|
||||
# Need to register global_opts
|
||||
from cinder.common import config # noqa
|
||||
from cinder.common import constants
|
||||
from cinder.db import api as session
|
||||
from cinder.i18n import _
|
||||
from cinder import service
|
||||
@ -93,7 +94,7 @@ def main():
|
||||
try:
|
||||
server = service.Service.create(host=host,
|
||||
service_name=backend,
|
||||
binary='cinder-volume',
|
||||
binary=constants.VOLUME_BINARY,
|
||||
coordination=True,
|
||||
cluster=cluster)
|
||||
except Exception:
|
||||
|
@ -52,6 +52,7 @@ from sqlalchemy.sql import func
|
||||
from sqlalchemy.sql import sqltypes
|
||||
|
||||
from cinder.api import common
|
||||
from cinder.common import constants
|
||||
from cinder.common import sqlalchemyutils
|
||||
from cinder import db
|
||||
from cinder.db.sqlalchemy import models
|
||||
@ -626,7 +627,7 @@ def volume_service_uuids_online_data_migration(context, max_count):
|
||||
vol_refs = query.limit(max_count).all()
|
||||
|
||||
service_refs = model_query(context, models.Service).filter_by(
|
||||
topic="cinder-volume").limit(max_count).all()
|
||||
topic=constants.VOLUME_TOPIC).limit(max_count).all()
|
||||
|
||||
# build a map to access the service uuid by host
|
||||
svc_map = {}
|
||||
|
@ -63,7 +63,7 @@ class BaseAdminTest(test.TestCase):
|
||||
def _create_volume(self, context, updates=None):
|
||||
db_volume = {'status': 'available',
|
||||
'host': 'test',
|
||||
'binary': 'cinder-volume',
|
||||
'binary': constants.VOLUME_BINARY,
|
||||
'availability_zone': 'fake_zone',
|
||||
'attach_status': fields.VolumeAttachStatus.DETACHED}
|
||||
if updates:
|
||||
@ -98,9 +98,9 @@ class AdminActionsTest(BaseAdminTest):
|
||||
|
||||
def _get_minimum_rpc_version_mock(ctxt, binary):
|
||||
binary_map = {
|
||||
'cinder-volume': rpcapi.VolumeAPI,
|
||||
'cinder-backup': backup_rpcapi.BackupAPI,
|
||||
'cinder-scheduler': scheduler_rpcapi.SchedulerAPI,
|
||||
constants.VOLUME_BINARY: rpcapi.VolumeAPI,
|
||||
constants.BACKUP_BINARY: backup_rpcapi.BackupAPI,
|
||||
constants.SCHEDULER_BINARY: scheduler_rpcapi.SchedulerAPI,
|
||||
}
|
||||
return binary_map[binary].RPC_API_VERSION
|
||||
|
||||
@ -501,12 +501,12 @@ class AdminActionsTest(BaseAdminTest):
|
||||
db.service_create(self.ctx,
|
||||
{'host': 'test',
|
||||
'topic': constants.VOLUME_TOPIC,
|
||||
'binary': 'cinder-volume',
|
||||
'binary': constants.VOLUME_BINARY,
|
||||
'created_at': timeutils.utcnow()})
|
||||
db.service_create(self.ctx,
|
||||
{'host': 'test2',
|
||||
'topic': constants.VOLUME_TOPIC,
|
||||
'binary': 'cinder-volume',
|
||||
'binary': constants.VOLUME_BINARY,
|
||||
'created_at': timeutils.utcnow()})
|
||||
db.service_create(self.ctx,
|
||||
{'host': 'clustered_host',
|
||||
|
@ -21,6 +21,7 @@ from oslo_utils import timeutils
|
||||
import webob.exc
|
||||
|
||||
from cinder.api.contrib import hosts as os_hosts
|
||||
from cinder.common import constants
|
||||
from cinder import context
|
||||
from cinder import exception
|
||||
from cinder.objects import service
|
||||
@ -113,9 +114,9 @@ class HostTestCase(test.TestCase):
|
||||
hosts = os_hosts._list_hosts(self.req)
|
||||
self.assertEqual(LIST_RESPONSE, hosts)
|
||||
|
||||
cinder_hosts = os_hosts._list_hosts(self.req, 'cinder-volume')
|
||||
cinder_hosts = os_hosts._list_hosts(self.req, constants.VOLUME_BINARY)
|
||||
expected = [host for host in LIST_RESPONSE
|
||||
if host['service'] == 'cinder-volume']
|
||||
if host['service'] == constants.VOLUME_BINARY]
|
||||
self.assertEqual(expected, cinder_hosts)
|
||||
|
||||
def test_list_hosts_with_zone(self):
|
||||
@ -158,8 +159,8 @@ class HostTestCase(test.TestCase):
|
||||
def test_show_host(self, mock_get_host):
|
||||
host = 'test_host'
|
||||
test_service = service.Service(id=1, host=host,
|
||||
binary='cinder-volume',
|
||||
topic='cinder-volume')
|
||||
binary=constants.VOLUME_BINARY,
|
||||
topic=constants.VOLUME_TOPIC)
|
||||
mock_get_host.return_value = test_service
|
||||
|
||||
ctxt1 = context.RequestContext(project_id=fake_constants.PROJECT_ID,
|
||||
|
@ -26,6 +26,7 @@ import webob.exc
|
||||
from cinder.api.contrib import services
|
||||
from cinder.api import extensions
|
||||
from cinder.api import microversions as mv
|
||||
from cinder.common import constants
|
||||
from cinder import context
|
||||
from cinder import exception
|
||||
from cinder import objects
|
||||
@ -129,7 +130,7 @@ class FakeRequest(object):
|
||||
|
||||
class FakeRequestWithBinary(FakeRequest):
|
||||
def __init__(self, **kwargs):
|
||||
kwargs.setdefault('binary', 'cinder-volume')
|
||||
kwargs.setdefault('binary', constants.VOLUME_BINARY)
|
||||
super(FakeRequestWithBinary, self).__init__(**kwargs)
|
||||
|
||||
|
||||
@ -177,7 +178,7 @@ def fake_service_update(context, service_id, values):
|
||||
if service is None:
|
||||
raise exception.ServiceNotFound(service_id=service_id)
|
||||
else:
|
||||
{'host': 'host1', 'service': 'cinder-volume',
|
||||
{'host': 'host1', 'service': constants.VOLUME_BINARY,
|
||||
'disabled': values['disabled']}
|
||||
|
||||
|
||||
@ -571,7 +572,7 @@ class ServicesTest(test.TestCase):
|
||||
self.assertEqual(response, res_dict)
|
||||
|
||||
def test_services_enable_with_service_key(self):
|
||||
body = {'host': 'host1', 'service': 'cinder-volume'}
|
||||
body = {'host': 'host1', 'service': constants.VOLUME_BINARY}
|
||||
req = fakes.HTTPRequest.blank(
|
||||
'/v2/%s/os-services/enable' % fake.PROJECT_ID)
|
||||
res_dict = self.controller.update(req, "enable", body)
|
||||
@ -579,7 +580,7 @@ class ServicesTest(test.TestCase):
|
||||
self.assertEqual('enabled', res_dict['status'])
|
||||
|
||||
def test_services_enable_with_binary_key(self):
|
||||
body = {'host': 'host1', 'binary': 'cinder-volume'}
|
||||
body = {'host': 'host1', 'binary': constants.VOLUME_BINARY}
|
||||
req = fakes.HTTPRequest.blank(
|
||||
'/v2/%s/os-services/enable' % fake.PROJECT_ID)
|
||||
res_dict = self.controller.update(req, "enable", body)
|
||||
@ -589,7 +590,7 @@ class ServicesTest(test.TestCase):
|
||||
def test_services_disable_with_service_key(self):
|
||||
req = fakes.HTTPRequest.blank(
|
||||
'/v2/%s/os-services/disable' % fake.PROJECT_ID)
|
||||
body = {'host': 'host1', 'service': 'cinder-volume'}
|
||||
body = {'host': 'host1', 'service': constants.VOLUME_BINARY}
|
||||
res_dict = self.controller.update(req, "disable", body)
|
||||
|
||||
self.assertEqual('disabled', res_dict['status'])
|
||||
@ -597,7 +598,7 @@ class ServicesTest(test.TestCase):
|
||||
def test_services_disable_with_binary_key(self):
|
||||
req = fakes.HTTPRequest.blank(
|
||||
'/v2/%s/os-services/disable' % fake.PROJECT_ID)
|
||||
body = {'host': 'host1', 'binary': 'cinder-volume'}
|
||||
body = {'host': 'host1', 'binary': constants.VOLUME_BINARY}
|
||||
res_dict = self.controller.update(req, "disable", body)
|
||||
|
||||
self.assertEqual('disabled', res_dict['status'])
|
||||
@ -779,7 +780,7 @@ class ServicesTest(test.TestCase):
|
||||
mock.sentinel.context, body)
|
||||
self.assertEqual([binary], binaries)
|
||||
|
||||
if binary == 'cinder-api':
|
||||
if binary == constants.API_BINARY:
|
||||
self.assertEqual([], services)
|
||||
service_list_mock.assert_not_called()
|
||||
else:
|
||||
@ -804,9 +805,9 @@ class ServicesTest(test.TestCase):
|
||||
def test__set_log(self, backup_rpc_mock, vol_rpc_mock, sch_rpc_mock,
|
||||
set_log_mock, get_all_mock, get_log_mock):
|
||||
services = [
|
||||
objects.Service(self.context, binary='cinder-scheduler'),
|
||||
objects.Service(self.context, binary='cinder-volume'),
|
||||
objects.Service(self.context, binary='cinder-backup'),
|
||||
objects.Service(self.context, binary=constants.SCHEDULER_BINARY),
|
||||
objects.Service(self.context, binary=constants.VOLUME_BINARY),
|
||||
objects.Service(self.context, binary=constants.BACKUP_BINARY),
|
||||
]
|
||||
get_all_mock.return_value = services
|
||||
body = {'binary': '*', 'prefix': 'eventlet.', 'level': 'debug'}
|
||||
@ -852,11 +853,12 @@ class ServicesTest(test.TestCase):
|
||||
]
|
||||
|
||||
services = [
|
||||
objects.Service(self.context, binary='cinder-scheduler',
|
||||
objects.Service(self.context, binary=constants.SCHEDULER_BINARY,
|
||||
host='host'),
|
||||
objects.Service(self.context, binary='cinder-volume',
|
||||
objects.Service(self.context, binary=constants.VOLUME_BINARY,
|
||||
host='host@backend#pool'),
|
||||
objects.Service(self.context, binary='cinder-backup', host='host'),
|
||||
objects.Service(self.context, binary=constants.BACKUP_BINARY,
|
||||
host='host'),
|
||||
]
|
||||
get_all_mock.return_value = services
|
||||
body = {'binary': '*', 'prefix': 'eventlet.'}
|
||||
@ -874,7 +876,7 @@ class ServicesTest(test.TestCase):
|
||||
'levels': mock.sentinel.api_levels},
|
||||
{'binary': 'cinder-scheduler', 'host': 'host',
|
||||
'levels': {'p5': 'l5', 'p6': 'l6'}},
|
||||
{'binary': 'cinder-volume',
|
||||
{'binary': constants.VOLUME_BINARY,
|
||||
'host': 'host@backend#pool',
|
||||
'levels': {'p3': 'l3', 'p4': 'l4'}},
|
||||
{'binary': 'cinder-backup', 'host': 'host',
|
||||
|
@ -21,6 +21,7 @@ from six.moves import http_client
|
||||
from six.moves.urllib.parse import urlencode
|
||||
import webob
|
||||
|
||||
from cinder.common import constants
|
||||
from cinder import context
|
||||
from cinder import exception
|
||||
from cinder import objects
|
||||
@ -120,14 +121,14 @@ class SnapshotManageTest(test.TestCase):
|
||||
"""
|
||||
mock_db.return_value = fake_service.fake_service_obj(
|
||||
self._admin_ctxt,
|
||||
binary='cinder-volume')
|
||||
binary=constants.VOLUME_BINARY)
|
||||
body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': 'fake_ref'}}
|
||||
res = self._get_resp_post(body)
|
||||
self.assertEqual(http_client.ACCEPTED, res.status_int, res)
|
||||
|
||||
# Check the db.service_get was called with correct arguments.
|
||||
mock_db.assert_called_once_with(
|
||||
mock.ANY, None, host='fake_host', binary='cinder-volume',
|
||||
mock.ANY, None, host='fake_host', binary=constants.VOLUME_BINARY,
|
||||
cluster_name=None)
|
||||
|
||||
# Check the create_snapshot_in_db was called with correct arguments.
|
||||
|
@ -22,6 +22,7 @@ import webob
|
||||
|
||||
from cinder.api import microversions as mv
|
||||
from cinder.api.v3 import router as router_v3
|
||||
from cinder.common import constants
|
||||
from cinder import context
|
||||
from cinder import objects
|
||||
from cinder import test
|
||||
@ -77,7 +78,7 @@ class SnapshotManageTest(test.TestCase):
|
||||
"""
|
||||
mock_service_get.return_value = fake_service.fake_service_obj(
|
||||
self._admin_ctxt,
|
||||
binary='cinder-volume')
|
||||
binary=constants.VOLUME_BINARY)
|
||||
|
||||
body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': 'fake_ref'}}
|
||||
res = self._get_resp_post(body)
|
||||
@ -187,7 +188,7 @@ class SnapshotManageTest(test.TestCase):
|
||||
sort_dirs=['desc'], want_objects=True)
|
||||
detail_view_mock.assert_called_once_with(mock.ANY, snaps, len(snaps))
|
||||
get_service_mock.assert_called_once_with(
|
||||
mock.ANY, None, host=host, binary='cinder-volume',
|
||||
mock.ANY, None, host=host, binary=constants.VOLUME_BINARY,
|
||||
cluster_name=cluster_name)
|
||||
|
||||
@ddt.data(mv.MANAGE_EXISTING_LIST, mv.MANAGE_EXISTING_CLUSTER)
|
||||
|
@ -22,6 +22,7 @@ import webob
|
||||
|
||||
from cinder.api import microversions as mv
|
||||
from cinder.api.v3 import router as router_v3
|
||||
from cinder.common import constants
|
||||
from cinder import context
|
||||
from cinder import objects
|
||||
from cinder import test
|
||||
@ -188,7 +189,7 @@ class VolumeManageTest(test.TestCase):
|
||||
detail_view_mock.assert_called_once_with(mock.ANY, volumes,
|
||||
len(volumes))
|
||||
get_service_mock.assert_called_once_with(
|
||||
mock.ANY, None, host=host, binary='cinder-volume',
|
||||
mock.ANY, None, host=host, binary=constants.VOLUME_BINARY,
|
||||
cluster_name=cluster_name)
|
||||
|
||||
@ddt.data(mv.MANAGE_EXISTING_LIST, mv.MANAGE_EXISTING_CLUSTER)
|
||||
|
@ -22,6 +22,7 @@ import webob
|
||||
from cinder.api import microversions as mv
|
||||
from cinder.api.v3 import router as router_v3
|
||||
from cinder.api.v3 import workers
|
||||
from cinder.common import constants
|
||||
from cinder import context
|
||||
from cinder import objects
|
||||
from cinder import test
|
||||
@ -30,13 +31,13 @@ from cinder.tests.unit import fake_constants as fake
|
||||
|
||||
|
||||
SERVICES = (
|
||||
[objects.Service(id=1, host='host1', binary='cinder-volume',
|
||||
[objects.Service(id=1, host='host1', binary=constants.VOLUME_BINARY,
|
||||
cluster_name='mycluster'),
|
||||
objects.Service(id=2, host='host2', binary='cinder-volume',
|
||||
objects.Service(id=2, host='host2', binary=constants.VOLUME_BINARY,
|
||||
cluster_name='mycluster')],
|
||||
[objects.Service(id=3, host='host3', binary='cinder-volume',
|
||||
[objects.Service(id=3, host='host3', binary=constants.VOLUME_BINARY,
|
||||
cluster_name='mycluster'),
|
||||
objects.Service(id=4, host='host4', binary='cinder-volume',
|
||||
objects.Service(id=4, host='host4', binary=constants.VOLUME_BINARY,
|
||||
cluster_name='mycluster')],
|
||||
)
|
||||
|
||||
|
@ -23,6 +23,7 @@ import ddt
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
|
||||
from cinder.common import constants
|
||||
from cinder import context
|
||||
from cinder import exception
|
||||
from cinder.message import message_field
|
||||
@ -424,17 +425,17 @@ class SchedulerManagerTestCase(test.TestCase):
|
||||
|
||||
def test_cleanup_destination_volume(self):
|
||||
service = objects.Service(id=1, host='hostname', cluster_name=None,
|
||||
binary='cinder-volume')
|
||||
binary=constants.VOLUME_BINARY)
|
||||
result = self.manager._cleanup_destination(None, service)
|
||||
expected = self.manager.volume_api.do_cleanup, service, service.host
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_cleanup_destination_volume_cluster_cache_hit(self):
|
||||
cluster = objects.Cluster(id=1, name='mycluster',
|
||||
binary='cinder-volume')
|
||||
binary=constants.VOLUME_BINARY)
|
||||
service = objects.Service(id=2, host='hostname',
|
||||
cluster_name=cluster.name,
|
||||
binary='cinder-volume')
|
||||
binary=constants.VOLUME_BINARY)
|
||||
cluster_cache = {'cinder-volume': {'mycluster': cluster}}
|
||||
result = self.manager._cleanup_destination(cluster_cache, service)
|
||||
expected = self.manager.volume_api.do_cleanup, cluster, cluster.name
|
||||
@ -443,11 +444,11 @@ class SchedulerManagerTestCase(test.TestCase):
|
||||
@mock.patch('cinder.objects.Cluster.get_by_id')
|
||||
def test_cleanup_destination_volume_cluster_cache_miss(self, get_mock):
|
||||
cluster = objects.Cluster(id=1, name='mycluster',
|
||||
binary='cinder-volume')
|
||||
binary=constants.VOLUME_BINARY)
|
||||
service = objects.Service(self.context,
|
||||
id=2, host='hostname',
|
||||
cluster_name=cluster.name,
|
||||
binary='cinder-volume')
|
||||
binary=constants.VOLUME_BINARY)
|
||||
get_mock.return_value = cluster
|
||||
cluster_cache = collections.defaultdict(dict)
|
||||
result = self.manager._cleanup_destination(cluster_cache, service)
|
||||
@ -470,24 +471,24 @@ class SchedulerManagerTestCase(test.TestCase):
|
||||
@mock.patch('cinder.objects.ServiceList.get_all')
|
||||
def test_work_cleanup(self, get_mock, vol_clean_mock, sch_clean_mock):
|
||||
args = dict(service_id=1, cluster_name='cluster_name', host='host',
|
||||
binary='cinder-volume', is_up=False, disabled=True,
|
||||
binary=constants.VOLUME_BINARY, is_up=False, disabled=True,
|
||||
resource_id=fake.VOLUME_ID, resource_type='Volume')
|
||||
|
||||
cluster = objects.Cluster(id=1, name=args['cluster_name'],
|
||||
binary='cinder-volume')
|
||||
binary=constants.VOLUME_BINARY)
|
||||
services = [objects.Service(self.context,
|
||||
id=2, host='hostname',
|
||||
cluster_name=cluster.name,
|
||||
binary='cinder-volume',
|
||||
binary=constants.VOLUME_BINARY,
|
||||
cluster=cluster),
|
||||
objects.Service(self.context,
|
||||
id=3, host='hostname',
|
||||
cluster_name=None,
|
||||
binary='cinder-scheduler'),
|
||||
binary=constants.SCHEDULER_BINARY),
|
||||
objects.Service(self.context,
|
||||
id=4, host='hostname',
|
||||
cluster_name=None,
|
||||
binary='cinder-volume')]
|
||||
binary=constants.VOLUME_BINARY)]
|
||||
get_mock.return_value = services
|
||||
|
||||
cleanup_request = objects.CleanupRequest(self.context, **args)
|
||||
|
@ -37,6 +37,7 @@ from cinder.cmd import rtstool as cinder_rtstool
|
||||
from cinder.cmd import scheduler as cinder_scheduler
|
||||
from cinder.cmd import volume as cinder_volume
|
||||
from cinder.cmd import volume_usage_audit
|
||||
from cinder.common import constants
|
||||
from cinder import context
|
||||
from cinder.db.sqlalchemy import api as sqlalchemy_api
|
||||
from cinder import exception
|
||||
@ -173,10 +174,10 @@ class TestCinderVolumeCmd(test.TestCase):
|
||||
log_setup.assert_called_once_with(CONF, "cinder")
|
||||
monkey_patch.assert_called_once_with()
|
||||
get_launcher.assert_called_once_with()
|
||||
c1 = mock.call(binary='cinder-volume', host='host@backend1',
|
||||
c1 = mock.call(binary=constants.VOLUME_BINARY, host='host@backend1',
|
||||
service_name='backend1', coordination=True,
|
||||
cluster=None)
|
||||
c2 = mock.call(binary='cinder-volume', host='host@backend2',
|
||||
c2 = mock.call(binary=constants.VOLUME_BINARY, host='host@backend2',
|
||||
service_name='backend2', coordination=True,
|
||||
cluster=None)
|
||||
service_create.assert_has_calls([c1, c2])
|
||||
@ -2052,8 +2053,8 @@ class TestVolumeSharedTargetsOnlineMigration(test.TestCase):
|
||||
# Need a service to query
|
||||
values = {
|
||||
'host': 'host1@lvm-driver1',
|
||||
'binary': 'cinder-volume',
|
||||
'topic': 'cinder-volume',
|
||||
'binary': constants.VOLUME_BINARY,
|
||||
'topic': constants.VOLUME_TOPIC,
|
||||
'uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'}
|
||||
utils.create_service(ctxt, values)
|
||||
|
||||
@ -2090,8 +2091,8 @@ class TestVolumeSharedTargetsOnlineMigration(test.TestCase):
|
||||
|
||||
values = {
|
||||
'host': 'host1@lvm-driver1',
|
||||
'binary': 'cinder-volume',
|
||||
'topic': 'cinder-volume',
|
||||
'binary': constants.VOLUME_BINARY,
|
||||
'topic': constants.VOLUME_TOPIC,
|
||||
'uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'}
|
||||
utils.create_service(ctxt, values)
|
||||
|
||||
|
@ -27,6 +27,7 @@ import six
|
||||
from sqlalchemy.sql import operators
|
||||
|
||||
from cinder.api import common
|
||||
from cinder.common import constants
|
||||
from cinder import context
|
||||
from cinder import db
|
||||
from cinder.db.sqlalchemy import api as sqlalchemy_api
|
||||
@ -166,13 +167,13 @@ class DBAPIServiceTestCase(BaseTest):
|
||||
# Force create one entry with no UUID
|
||||
sqlalchemy_api.service_create(self.ctxt, {
|
||||
'host': 'host1',
|
||||
'binary': 'cinder-volume',
|
||||
'binary': constants.VOLUME_BINARY,
|
||||
'topic': 'volume', })
|
||||
|
||||
# Create another one with a valid UUID
|
||||
sqlalchemy_api.service_create(self.ctxt, {
|
||||
'host': 'host2',
|
||||
'binary': 'cinder-volume',
|
||||
'binary': constants.VOLUME_BINARY,
|
||||
'topic': 'volume',
|
||||
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'})
|
||||
|
||||
@ -186,15 +187,15 @@ class DBAPIServiceTestCase(BaseTest):
|
||||
def test_service_uuid_migrations_with_limit(self):
|
||||
sqlalchemy_api.service_create(self.ctxt, {
|
||||
'host': 'host1',
|
||||
'binary': 'cinder-volume',
|
||||
'binary': constants.VOLUME_BINARY,
|
||||
'topic': 'volume', })
|
||||
sqlalchemy_api.service_create(self.ctxt, {
|
||||
'host': 'host2',
|
||||
'binary': 'cinder-volume',
|
||||
'binary': constants.VOLUME_BINARY,
|
||||
'topic': 'volume', })
|
||||
sqlalchemy_api.service_create(self.ctxt, {
|
||||
'host': 'host3',
|
||||
'binary': 'cinder-volume',
|
||||
'binary': constants.VOLUME_BINARY,
|
||||
'topic': 'volume', })
|
||||
# Run the migration and verify that we updated 1 entry
|
||||
total, updated = db.service_uuids_online_data_migration(
|
||||
@ -474,8 +475,8 @@ class DBAPIServiceTestCase(BaseTest):
|
||||
# Need a service to query
|
||||
values = {
|
||||
'host': 'host1@lvm-driver1',
|
||||
'binary': 'cinder-volume',
|
||||
'topic': 'cinder-volume'}
|
||||
'binary': constants.VOLUME_BINARY,
|
||||
'topic': constants.VOLUME_TOPIC}
|
||||
utils.create_service(self.ctxt, values)
|
||||
|
||||
# Run the migration and verify that we updated 1 entry
|
||||
@ -496,8 +497,8 @@ class DBAPIServiceTestCase(BaseTest):
|
||||
|
||||
values = {
|
||||
'host': 'host1@lvm-driver1',
|
||||
'binary': 'cinder-volume',
|
||||
'topic': 'cinder-volume',
|
||||
'binary': constants.VOLUME_BINARY,
|
||||
'topic': constants.VOLUME_TOPIC,
|
||||
'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}
|
||||
utils.create_service(self.ctxt, values)
|
||||
|
||||
|
@ -25,6 +25,7 @@ from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_db import exception as db_exc
|
||||
|
||||
from cinder.common import constants
|
||||
from cinder import context
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
@ -385,7 +386,8 @@ class ServiceTestCase(test.TestCase):
|
||||
return_value='1.6')
|
||||
def test_start_rpc_and_init_host_no_cluster(self, is_upgrading_mock):
|
||||
"""Test that without cluster we don't create rpc service."""
|
||||
app = service.Service.create(host=self.host, binary='cinder-volume',
|
||||
app = service.Service.create(host=self.host,
|
||||
binary=constants.VOLUME_BINARY,
|
||||
cluster=None, topic=self.topic)
|
||||
self._check_rpc_servers_and_init_host(app, True, None)
|
||||
|
||||
@ -395,7 +397,8 @@ class ServiceTestCase(test.TestCase):
|
||||
get_min_obj_mock.return_value = '1.7'
|
||||
cluster = 'cluster@backend#pool'
|
||||
self.host = 'host@backend#pool'
|
||||
app = service.Service.create(host=self.host, binary='cinder-volume',
|
||||
app = service.Service.create(host=self.host,
|
||||
binary=constants.VOLUME_BINARY,
|
||||
cluster=cluster, topic=self.topic)
|
||||
self._check_rpc_servers_and_init_host(app, True, cluster)
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
import mock
|
||||
|
||||
from cinder.common import constants
|
||||
from cinder import service
|
||||
from cinder import test
|
||||
|
||||
@ -30,7 +31,7 @@ class SetupProfilerTestCase(test.TestCase):
|
||||
def test_profiler_not_present(self):
|
||||
service.profiler = None
|
||||
service.LOG.debug = mock.MagicMock()
|
||||
service.setup_profiler("cinder-volume", "localhost")
|
||||
service.setup_profiler(constants.VOLUME_BINARY, "localhost")
|
||||
service.LOG.debug.assert_called_once_with("osprofiler is not present")
|
||||
|
||||
@mock.patch("cinder.service.context")
|
||||
@ -38,15 +39,15 @@ class SetupProfilerTestCase(test.TestCase):
|
||||
service.CONF.profiler.enabled = True
|
||||
return_value = {"Meaning Of Life": 42}
|
||||
context.get_admin_context().to_dict.return_value = return_value
|
||||
service.setup_profiler("cinder-volume", "localhost")
|
||||
service.setup_profiler(constants.VOLUME_BINARY, "localhost")
|
||||
service.osprofiler_initializer.init_from_conf.assert_called_once_with(
|
||||
conf=service.CONF,
|
||||
context=return_value,
|
||||
project="cinder",
|
||||
service="cinder-volume",
|
||||
service=constants.VOLUME_BINARY,
|
||||
host="localhost")
|
||||
|
||||
def test_profiler_disabled(self):
|
||||
service.CONF.profiler.enabled = False
|
||||
service.setup_profiler("cinder-volume", "localhost")
|
||||
service.setup_profiler(constants.VOLUME_BINARY, "localhost")
|
||||
service.osprofiler_initializer.init_from_conf.assert_not_called()
|
||||
|
@ -24,6 +24,7 @@ from oslo_service import loopingcall
|
||||
from oslo_utils import timeutils
|
||||
import oslo_versionedobjects
|
||||
|
||||
from cinder.common import constants
|
||||
from cinder import context
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
@ -496,7 +497,7 @@ def create_service(ctxt, values=None):
|
||||
def default_cluster_values():
|
||||
return {
|
||||
'name': 'cluster_name',
|
||||
'binary': 'cinder-volume',
|
||||
'binary': constants.VOLUME_BINARY,
|
||||
'disabled': False,
|
||||
'disabled_reason': None,
|
||||
'deleted': False,
|
||||
|
@ -121,7 +121,7 @@ class ReplicationTestCase(base.BaseVolumeTestCase):
|
||||
|
||||
mock_db_get_all.return_value = [fake_service.fake_service_obj(
|
||||
self.context,
|
||||
binary='cinder-volume')]
|
||||
binary=constants.VOLUME_BINARY)]
|
||||
mock_db_update.return_value = None
|
||||
volume_api = cinder.volume.api.API()
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
@ -138,7 +138,7 @@ class ReplicationTestCase(base.BaseVolumeTestCase):
|
||||
"""Test replication freeze_host."""
|
||||
|
||||
service = fake_service.fake_service_obj(self.context,
|
||||
binary='cinder-volume')
|
||||
binary=constants.VOLUME_BINARY)
|
||||
mock_get_all.return_value = [service]
|
||||
mock_freeze.return_value = True
|
||||
volume_api = cinder.volume.api.API()
|
||||
@ -155,7 +155,7 @@ class ReplicationTestCase(base.BaseVolumeTestCase):
|
||||
|
||||
mock_get_all.return_value = [fake_service.fake_service_obj(
|
||||
self.context,
|
||||
binary='cinder-volume')]
|
||||
binary=constants.VOLUME_BINARY)]
|
||||
mock_db_update.return_value = None
|
||||
volume_api = cinder.volume.api.API()
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
@ -172,7 +172,7 @@ class ReplicationTestCase(base.BaseVolumeTestCase):
|
||||
"""Test replication thaw_host."""
|
||||
|
||||
service = fake_service.fake_service_obj(self.context,
|
||||
binary='cinder-volume')
|
||||
binary=constants.VOLUME_BINARY)
|
||||
mock_get_all.return_value = [service]
|
||||
mock_thaw.return_value = True
|
||||
volume_api = cinder.volume.api.API()
|
||||
@ -189,7 +189,7 @@ class ReplicationTestCase(base.BaseVolumeTestCase):
|
||||
|
||||
mock_get_all.return_value = [fake_service.fake_service_obj(
|
||||
self.context,
|
||||
binary='cinder-volume')]
|
||||
binary=constants.VOLUME_BINARY)]
|
||||
mock_db_update.return_value = None
|
||||
volume_api = cinder.volume.api.API()
|
||||
self.assertRaises(exception.InvalidInput,
|
||||
@ -463,7 +463,7 @@ class ReplicationTestCase(base.BaseVolumeTestCase):
|
||||
secondary_id=None):
|
||||
host = vol_utils.extract_host(self.manager.host)
|
||||
utils.create_service(self.context, {'host': host,
|
||||
'binary': 'cinder-volume'})
|
||||
'binary': constants.VOLUME_BINARY})
|
||||
for volume in in_volumes:
|
||||
utils.create_volume(self.context, self.manager.host, **volume)
|
||||
|
||||
|
@ -21,6 +21,7 @@ import mock
|
||||
from oslo_config import cfg
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from cinder.common import constants
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder import objects
|
||||
@ -356,7 +357,7 @@ class VolumeRPCAPITestCase(test.RPCAPITestCase):
|
||||
def test_freeze_host(self):
|
||||
service = fake_service.fake_service_obj(self.context,
|
||||
host='fake_host',
|
||||
binary='cinder-volume')
|
||||
binary=constants.VOLUME_BINARY)
|
||||
self._test_rpc_api('freeze_host',
|
||||
rpc_method='call',
|
||||
server='fake_host',
|
||||
@ -366,7 +367,7 @@ class VolumeRPCAPITestCase(test.RPCAPITestCase):
|
||||
def test_thaw_host(self):
|
||||
service = fake_service.fake_service_obj(self.context,
|
||||
host='fake_host',
|
||||
binary='cinder-volume')
|
||||
binary=constants.VOLUME_BINARY)
|
||||
self._test_rpc_api('thaw_host',
|
||||
rpc_method='call',
|
||||
server='fake_host',
|
||||
|
@ -1686,7 +1686,7 @@ class API(base.Base):
|
||||
# cluster itself is also up.
|
||||
try:
|
||||
service = objects.Service.get_by_id(elevated, None, host=svc_host,
|
||||
binary='cinder-volume',
|
||||
binary=constants.VOLUME_BINARY,
|
||||
cluster_name=svc_cluster)
|
||||
except exception.ServiceNotFound:
|
||||
with excutils.save_and_reraise_exception():
|
||||
|
@ -138,7 +138,7 @@ class VolumeAPI(rpc.RPCAPI):
|
||||
RPC_API_VERSION = '3.15'
|
||||
RPC_DEFAULT_VERSION = '3.0'
|
||||
TOPIC = constants.VOLUME_TOPIC
|
||||
BINARY = 'cinder-volume'
|
||||
BINARY = constants.VOLUME_BINARY
|
||||
|
||||
def _get_cctxt(self, host=None, version=None, **kwargs):
|
||||
if host:
|
||||
|
Loading…
x
Reference in New Issue
Block a user