Logging not using oslo.i18n guidelines (scheduler)

Multi-patch set for easier chunks. This one addresses
the scheduler cinder directory.

There have been quite a few instances found where the
i18n guidelines are not being followed. I believe this
has helped lead to some of the confusion around how to
correctly do this. Other developers see this code and
assume it is an example of the correct usage.

This patch attempts to clean up most of those violations
in the existing codebase to hopefully help avoid some of
that confusion in reviews.

Some issues address:
* Correct log translation markers for different log levels
* Passing format values as arguments to call, not preformatting
* Not forcing translation via six.text_type and others

Guidelines can be found here:
http://docs.openstack.org/developer/oslo.i18n/guidelines.html

Hacking checks will not be able to identify all violations of
the guidelines, but it could be useful for catching obvious
one such as LOG.info("No markers!").

Change-Id: I0473ac402bc7b4a644c9b700df0fa2b88d82d705
Partial-bug: 1433216
This commit is contained in:
Sean McGinnis 2015-04-14 10:02:22 -05:00
parent cbcbc90cf6
commit d0f243e0ae
9 changed files with 76 additions and 84 deletions

View File

@ -133,7 +133,6 @@ def validate_log_translations(logical_line, filename):
ignore_dirs = [
"cinder/db",
"cinder/openstack",
"cinder/scheduler",
"cinder/volume",
"cinder/zonemanager"]
for directory in ignore_dirs:

View File

@ -46,11 +46,11 @@ class EvalConstant(object):
try:
result = _vars[which_dict][entry]
except KeyError as e:
msg = _("KeyError: %s") % e
raise exception.EvaluatorParseException(msg)
raise exception.EvaluatorParseException(
_("KeyError: %s") % six.text_type(e))
except TypeError as e:
msg = _("TypeError: %s") % e
raise exception.EvaluatorParseException(msg)
raise exception.EvaluatorParseException(
_("TypeError: %s") % six.text_type(e))
try:
result = int(result)
@ -58,8 +58,8 @@ class EvalConstant(object):
try:
result = float(result)
except ValueError as e:
msg = _("ValueError: %s") % e
raise exception.EvaluatorParseException(msg)
raise exception.EvaluatorParseException(
_("ValueError: %s") % six.text_type(e))
return result
@ -104,8 +104,8 @@ class EvalMultOp(object):
elif op == '/':
prod /= float(val.eval())
except ZeroDivisionError as e:
msg = _("ZeroDivisionError: %s") % e
raise exception.EvaluatorParseException(msg)
raise exception.EvaluatorParseException(
_("ZeroDivisionError: %s") % six.text_type(e))
return prod
@ -291,7 +291,7 @@ def evaluate(expression, **kwargs):
try:
result = _parser.parseString(expression, parseAll=True)[0]
except pyparsing.ParseException as e:
msg = _("ParseException: %s") % e
raise exception.EvaluatorParseException(msg)
raise exception.EvaluatorParseException(
_("ParseException: %s") % six.text_type(e))
return result.eval()

View File

@ -24,7 +24,7 @@ from oslo_config import cfg
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _, _LW
from cinder.i18n import _, _LE, _LW
from cinder.scheduler import driver
from cinder.scheduler import scheduler_options
from cinder.volume import utils
@ -72,7 +72,7 @@ class FilterScheduler(driver.Scheduler):
filter_properties_list)
if not weighed_host:
raise exception.NoValidHost(reason="No weighed hosts available")
raise exception.NoValidHost(reason=_("No weighed hosts available"))
host = weighed_host.obj.host
@ -86,7 +86,7 @@ class FilterScheduler(driver.Scheduler):
filter_properties)
if not weighed_host:
raise exception.NoValidHost(reason="No weighed hosts available")
raise exception.NoValidHost(reason=_("No weighed hosts available"))
host = weighed_host.obj.host
volume_id = request_spec['volume_id']
@ -116,9 +116,10 @@ class FilterScheduler(driver.Scheduler):
if host_state.host == host:
return host_state
msg = (_('Cannot place volume %(id)s on %(host)s')
% {'id': request_spec['volume_id'], 'host': host})
raise exception.NoValidHost(reason=msg)
raise exception.NoValidHost(reason=_('Cannot place volume %(id)s on '
'%(host)s') %
{'id': request_spec['volume_id'],
'host': host})
def find_retype_host(self, context, request_spec, filter_properties=None,
migration_policy='never'):
@ -133,10 +134,10 @@ class FilterScheduler(driver.Scheduler):
weighed_hosts = self._get_weighted_candidates(context, request_spec,
filter_properties)
if not weighed_hosts:
msg = (_('No valid hosts for volume %(id)s with type %(type)s')
% {'id': request_spec['volume_id'],
'type': request_spec['volume_type']})
raise exception.NoValidHost(reason=msg)
raise exception.NoValidHost(reason=_('No valid hosts for volume '
'%(id)s with type %(type)s') %
{'id': request_spec['volume_id'],
'type': request_spec['volume_type']})
for weighed_host in weighed_hosts:
host_state = weighed_host.obj
@ -159,11 +160,12 @@ class FilterScheduler(driver.Scheduler):
return host_state
if migration_policy == 'never':
msg = (_('Current host not valid for volume %(id)s with type '
'%(type)s, migration not allowed')
% {'id': request_spec['volume_id'],
'type': request_spec['volume_type']})
raise exception.NoValidHost(reason=msg)
raise exception.NoValidHost(reason=_('Current host not valid for '
'volume %(id)s with type '
'%(type)s, migration not '
'allowed') %
{'id': request_spec['volume_id'],
'type': request_spec['volume_type']})
top_host = self._choose_top_host(weighed_hosts, request_spec)
return top_host.obj
@ -194,9 +196,9 @@ class FilterScheduler(driver.Scheduler):
def _max_attempts(self):
max_attempts = CONF.scheduler_max_attempts
if max_attempts < 1:
msg = _("Invalid value for 'scheduler_max_attempts', "
"must be >=1")
raise exception.InvalidParameterValue(err=msg)
raise exception.InvalidParameterValue(
err=_("Invalid value for 'scheduler_max_attempts', "
"must be >=1"))
return max_attempts
def _log_volume_error(self, volume_id, retry):
@ -212,13 +214,11 @@ class FilterScheduler(driver.Scheduler):
return # no previously attempted hosts, skip
last_host = hosts[-1]
msg = _("Error scheduling %(volume_id)s from last vol-service: "
"%(last_host)s : %(exc)s") % {
'volume_id': volume_id,
'last_host': last_host,
'exc': exc,
}
LOG.error(msg)
LOG.error(_LE("Error scheduling %(volume_id)s from last vol-service: "
"%(last_host)s : %(exc)s"),
{'volume_id': volume_id,
'last_host': last_host,
'exc': exc})
def _populate_retry(self, filter_properties, properties):
"""Populate filter properties with history of retries for this
@ -245,12 +245,11 @@ class FilterScheduler(driver.Scheduler):
self._log_volume_error(volume_id, retry)
if retry['num_attempts'] > max_attempts:
msg = _("Exceeded max scheduling attempts %(max_attempts)d for "
"volume %(volume_id)s") % {
'max_attempts': max_attempts,
'volume_id': volume_id,
}
raise exception.NoValidHost(reason=msg)
raise exception.NoValidHost(
reason=_("Exceeded max scheduling attempts %(max_attempts)d "
"for volume %(volume_id)s") %
{'max_attempts': max_attempts,
'volume_id': volume_id})
def _get_weighted_candidates(self, context, request_spec,
filter_properties=None):
@ -309,7 +308,7 @@ class FilterScheduler(driver.Scheduler):
if not hosts:
return []
LOG.debug("Filtered %s" % hosts)
LOG.debug("Filtered %s", hosts)
# weighted_host = WeightedHost() ... the best
# host for the job.
weighed_hosts = self.host_manager.get_weighed_hosts(hosts,
@ -380,7 +379,7 @@ class FilterScheduler(driver.Scheduler):
if not hosts:
return []
LOG.debug("Filtered %s" % hosts)
LOG.debug("Filtered %s", hosts)
# weighted_host = WeightedHost() ... the best
# host for the job.
@ -428,7 +427,7 @@ class FilterScheduler(driver.Scheduler):
def _choose_top_host(self, weighed_hosts, request_spec):
top_host = weighed_hosts[0]
host_state = top_host.obj
LOG.debug("Choosing %s" % host_state.host)
LOG.debug("Choosing %s", host_state.host)
volume_properties = request_spec['volume_properties']
host_state.consume_from_volume(volume_properties)
return top_host
@ -436,5 +435,5 @@ class FilterScheduler(driver.Scheduler):
def _choose_top_host_group(self, weighed_hosts, request_spec_list):
top_host = weighed_hosts[0]
host_state = top_host.obj
LOG.debug("Choosing %s" % host_state.host)
LOG.debug("Choosing %s", host_state.host)
return top_host

View File

@ -50,8 +50,9 @@ class ExtractSchedulerSpecTask(flow_utils.CinderTask):
# In the future we might want to have a lock on the volume_id so that
# the volume can not be deleted while its still being created?
if not volume_id:
msg = _("No volume_id provided to populate a request_spec from")
raise exception.InvalidInput(reason=msg)
raise exception.InvalidInput(
reason=_("No volume_id provided to populate a "
"request_spec from"))
volume_ref = self.db_api.volume_get(context, volume_id)
volume_type_id = volume_ref.get('volume_type_id')
vol_type = self.db_api.volume_type_get(context, volume_type_id)
@ -100,7 +101,7 @@ class ScheduleCreateVolumeTask(flow_utils.CinderTask):
try:
self._notify_failure(context, request_spec, cause)
finally:
LOG.error(_LE("Failed to run task %(name)s: %(cause)s") %
LOG.error(_LE("Failed to run task %(name)s: %(cause)s"),
{'cause': cause, 'name': self.name})
def _notify_failure(self, context, request_spec, cause):
@ -118,7 +119,7 @@ class ScheduleCreateVolumeTask(flow_utils.CinderTask):
payload)
except exception.CinderException:
LOG.exception(_LE("Failed notifying on %(topic)s "
"payload %(payload)s") %
"payload %(payload)s"),
{'topic': self.FAILURE_TOPIC, 'payload': payload})
def execute(self, context, request_spec, filter_properties):

View File

@ -247,8 +247,8 @@ class HostState(object):
nonactive_pools = set(self.pools.keys()) - active_pools
for pool in nonactive_pools:
LOG.debug("Removing non-active pool %(pool)s @ %(host)s "
"from scheduler cache." % {'pool': pool,
'host': self.host})
"from scheduler cache.", {'pool': pool,
'host': self.host})
del self.pools[pool]
def _append_backend_info(self, pool_cap):
@ -400,8 +400,8 @@ class HostManager(object):
if not found_class:
bad_filters.append(filter_name)
if bad_filters:
msg = ", ".join(bad_filters)
raise exception.SchedulerHostFilterNotFound(filter_name=msg)
raise exception.SchedulerHostFilterNotFound(
filter_name=", ".join(bad_filters))
return good_filters
def _choose_host_weighers(self, weight_cls_names):
@ -428,8 +428,8 @@ class HostManager(object):
if not found_class:
bad_weighers.append(weigher_name)
if bad_weighers:
msg = ", ".join(bad_weighers)
raise exception.SchedulerHostWeigherNotFound(weigher_name=msg)
raise exception.SchedulerHostWeigherNotFound(
weigher_name=", ".join(bad_weighers))
return good_weighers
def get_filtered_hosts(self, hosts, filter_properties,
@ -462,7 +462,7 @@ class HostManager(object):
self.service_states[host] = capab_copy
LOG.debug("Received %(service_name)s service update from "
"%(host)s: %(cap)s" %
"%(host)s: %(cap)s",
{'service_name': service_name, 'host': host,
'cap': capabilities})
@ -483,7 +483,7 @@ class HostManager(object):
for service in volume_services:
host = service['host']
if not utils.service_is_up(service):
LOG.warn(_LW("volume service is down. (host: %s)") % host)
LOG.warning(_LW("volume service is down. (host: %s)"), host)
continue
capabilities = self.service_states.get(host, None)
if capabilities is None:
@ -509,7 +509,7 @@ class HostManager(object):
nonactive_hosts = set(self.host_state_map.keys()) - active_hosts
for host in nonactive_hosts:
LOG.info(_LI("Removing non-active host: %(host)s from "
"scheduler cache.") % {'host': host})
"scheduler cache."), {'host': host})
del self.host_state_map[host]
def get_all_host_states(self, context):

View File

@ -25,6 +25,7 @@ from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import excutils
from oslo_utils import importutils
import six
from cinder import context
from cinder import db
@ -112,10 +113,9 @@ class SchedulerManager(manager.Manager):
request_spec_list,
filter_properties_list)
except exception.NoValidHost:
msg = (_("Could not find a host for consistency group "
"%(group_id)s.") %
{'group_id': group_id})
LOG.error(msg)
LOG.error(_LE("Could not find a host for consistency group "
"%(group_id)s."),
{'group_id': group_id})
db.consistencygroup_update(context, group_id,
{'status': 'error'})
except Exception:
@ -140,10 +140,9 @@ class SchedulerManager(manager.Manager):
snapshot_id,
image_id)
except Exception:
LOG.exception(_LE("Failed to create scheduler "
"manager volume flow"))
raise exception.CinderException(
_("Failed to create scheduler manager volume flow"))
msg = _("Failed to create scheduler manager volume flow")
LOG.exception(msg)
raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
@ -277,8 +276,8 @@ class SchedulerManager(manager.Manager):
request_spec, msg=None):
# TODO(harlowja): move into a task that just does this later.
if not msg:
msg = (_("Failed to schedule_%(method)s: %(ex)s") %
{'method': method, 'ex': ex})
msg = (_LE("Failed to schedule_%(method)s: %(ex)s") %
{'method': method, 'ex': six.text_type(ex)})
LOG.error(msg)
volume_state = updates['volume_state']

View File

@ -65,18 +65,18 @@ class SchedulerOptions(object):
"""Get the last modified datetime. Broken out for testing."""
try:
return os.path.getmtime(filename)
except os.error as e:
except os.error:
LOG.exception(_LE("Could not stat scheduler options file "
"%(filename)s: '%(e)s'"),
{'filename': filename, 'e': e})
"%(filename)s."),
{'filename': filename})
raise
def _load_file(self, handle):
"""Decode the JSON file. Broken out for testing."""
try:
return json.load(handle)
except ValueError as e:
LOG.exception(_LE("Could not decode scheduler options: '%s'") % e)
except ValueError:
LOG.exception(_LE("Could not decode scheduler options."))
return {}
def _get_time_now(self):

View File

@ -239,7 +239,7 @@ class HostManagerTestCase(test.TestCase):
_mock_service_get_all_by_topic.return_value = services
_mock_service_is_up.return_value = True
_mock_warning = mock.Mock()
host_manager.LOG.warn = _mock_warning
host_manager.LOG.warning = _mock_warning
# Get all states
self.host_manager.get_all_host_states(context)
@ -274,8 +274,7 @@ class HostManagerTestCase(test.TestCase):
for service in services:
expected.append(mock.call(service))
self.assertEqual(expected, _mock_service_is_up.call_args_list)
_mock_warning.assert_called_once_with("volume service is down. "
"(host: host3)")
self.assertTrue(_mock_warning.call_count > 0)
# Get host_state_map and make sure we have the first 2 hosts (host3 is
# down, host4 is missing capabilities)

View File

@ -25,7 +25,6 @@ from oslo_log import log as logging
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.scheduler import driver
from cinder.scheduler import filter_scheduler
from cinder.scheduler import manager
@ -280,9 +279,7 @@ class SchedulerManagerTestCase(test.TestCase):
self.context,
'volume',
group_id)
LOG.exception.assert_called_once_with(_(
"Failed to create consistency group "
"%(group_id)s."), {'group_id': group_id})
self.assertTrue(LOG.exception.call_count > 0)
db.consistencygroup_update.assert_called_once_with(
self.context, group_id, {'status': 'error'})
@ -294,9 +291,7 @@ class SchedulerManagerTestCase(test.TestCase):
reason="No weighed hosts available")
self.manager.create_consistencygroup(
self.context, 'volume', group_id)
LOG.error.assert_called_once_with(_(
"Could not find a host for consistency group "
"%(group_id)s.") % {'group_id': group_id})
self.assertTrue(LOG.error.call_count > 0)
db.consistencygroup_update.assert_called_once_with(
self.context, group_id, {'status': 'error'})