Integrate OSprofiler and Cinder

*) Add osprofiler wsgi middleware
This middleware is used for 2 things:
1) It checks that person who want to trace is trusted and knows
secret HMAC key.
2) It start tracing in case of proper trace headers
and add first wsgi trace point, with info about HTTP request
*) Add initialization of osprofiler at start of serivce
Set's olso.messaging notifer instance (to send notifications to Ceilometer)

*) Fix fake notifier

NOTE to test this:

You should put to localrc:

  RECLONE=no

  CEILOMETER_NOTIFICATION_TOPICS=notifications,profiler

  ENABLED_SERVICES+=,ceilometer-acompute,ceilometer-acentral,ceilometer-anotification,ceilometer-collector
  ENABLED_SERVICES+=,ceilometer-alarm-evaluator,ceilometer-alarm-notifier
  ENABLED_SERVICES+=,ceilometer-api

You should use python-cinderclient with this patch:

  https://review.openstack.org/#/c/103359/

Run any command with --profile SECRET_KEY

  $ cinder --profile SECRET_KEY create 1
  # it will print <Trace ID>

Get pretty HTML with traces:

  $ osprofiler trace show --html <Profile ID>

note that osprofiler should be run from admin user name & tenant.

Change-Id: Ic3ce587946eaa7e6b19cf35a2cc905ae5de29dd3
This commit is contained in:
Boris Pavlovic 2014-06-29 20:03:28 +04:00
parent c8941ececc
commit a8fa3ceb1e
10 changed files with 103 additions and 11 deletions

View File

@ -30,6 +30,8 @@ from oslo.config import cfg
from oslo.db import exception as db_exc
from oslo.db import options
from oslo.db.sqlalchemy import session as db_session
import osprofiler.sqlalchemy
import sqlalchemy
from sqlalchemy import or_
from sqlalchemy.orm import joinedload, joinedload_all
from sqlalchemy.orm import RelationshipProperty
@ -46,6 +48,7 @@ from cinder.openstack.common import uuidutils
CONF = cfg.CONF
CONF.import_group("profiler", "cinder.service")
LOG = logging.getLogger(__name__)
options.set_defaults(CONF, connection='sqlite:///$state_path/cinder.sqlite')
@ -63,6 +66,13 @@ def _create_facade_lazily():
CONF.database.connection,
**dict(CONF.database.iteritems())
)
if CONF.profiler.profiler_enabled:
if CONF.profiler.trace_sqlalchemy:
osprofiler.sqlalchemy.add_tracing(sqlalchemy,
_FACADE.get_engine(),
"db")
return _FACADE

View File

@ -28,6 +28,7 @@ __all__ = [
from oslo.config import cfg
from oslo import messaging
from osprofiler import profiler
import cinder.context
import cinder.exception
@ -116,9 +117,22 @@ class RequestContextSerializer(messaging.Serializer):
return self._base.deserialize_entity(context, entity)
def serialize_context(self, context):
return context.to_dict()
_context = context.to_dict()
prof = profiler.get()
if prof:
trace_info = {
"hmac_key": prof.hmac_key,
"base_id": prof.get_base_id(),
"parent_id": prof.get_id()
}
_context.update({"trace_info": trace_info})
return _context
def deserialize_context(self, context):
trace_info = context.pop("trace_info", None)
if trace_info:
profiler.init(**trace_info)
return cinder.context.RequestContext.from_dict(context)

View File

@ -24,6 +24,9 @@ import random
from oslo.config import cfg
from oslo import messaging
import osprofiler.notifier
from osprofiler import profiler
import osprofiler.web
from cinder import context
from cinder import db
@ -64,8 +67,35 @@ service_opts = [
help='Number of workers for OpenStack Volume API service. '
'The default is equal to the number of CPUs available.'), ]
profiler_opts = [
cfg.BoolOpt("profiler_enabled", default=False,
help=_('If False fully disable profiling feature.')),
cfg.BoolOpt("trace_sqlalchemy", default=False,
help=_("If False doesn't trace SQL requests."))
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
CONF.register_opts(profiler_opts, group="profiler")
def setup_profiler(binary, host):
if CONF.profiler.profiler_enabled:
_notifier = osprofiler.notifier.create(
"Messaging", messaging, context.get_admin_context().to_dict(),
rpc.TRANSPORT, "cinder", binary, host)
osprofiler.notifier.set(_notifier)
LOG.warning("OSProfiler is enabled.\nIt means that person who knows "
"any of hmac_keys that are specified in "
"/etc/cinder/api-paste.ini can trace his requests. \n"
"In real life only operator can read this file so there "
"is no security issue. Note that even if person can "
"trigger profiler, only admin user can retrieve trace "
"information.\n"
"To disable OSprofiler set in cinder.conf:\n"
"[profiler]\nenabled=false")
else:
osprofiler.web.disable()
class Service(service.Service):
@ -89,6 +119,8 @@ class Service(service.Service):
self.topic = topic
self.manager_class_name = manager
manager_class = importutils.import_class(self.manager_class_name)
manager_class = profiler.trace_cls("rpc")(manager_class)
self.manager = manager_class(host=self.host,
service_name=service_name,
*args, **kwargs)
@ -99,6 +131,8 @@ class Service(service.Service):
self.saved_args, self.saved_kwargs = args, kwargs
self.timers = []
setup_profiler(binary, host)
def start(self):
version_string = version.version_string()
LOG.info(_('Starting %(topic)s node (version %(version_string)s)'),
@ -296,6 +330,8 @@ class WSGIService(object):
self.port = getattr(CONF, '%s_listen_port' % name, 0)
self.workers = getattr(CONF, '%s_workers' % name,
processutils.get_worker_count())
setup_profiler(name, self.host)
if self.workers < 1:
LOG.warn(_("Value of config option %(name)s_workers must be "
"integer greater than 1. Input value ignored.") %

View File

@ -34,13 +34,16 @@ FakeMessage = collections.namedtuple('Message',
class FakeNotifier(object):
def __init__(self, transport, publisher_id, serializer=None):
def __init__(self, transport, publisher_id, serializer=None, driver=None,
topic=None, retry=None):
self.transport = transport
self.publisher_id = publisher_id
for priority in ['debug', 'info', 'warn', 'error', 'critical']:
setattr(self, priority,
functools.partial(self._notify, priority.upper()))
self._serializer = serializer or messaging.serializer.NoOpSerializer()
self._topic = topic
self.retry = retry
def prepare(self, publisher_id=None):
if publisher_id is None:

View File

@ -45,8 +45,12 @@ class BaseBackupTest(test.TestCase):
super(BaseBackupTest, self).setUp()
vol_tmpdir = tempfile.mkdtemp()
self.flags(volumes_dir=vol_tmpdir)
self.backup_mgr = \
importutils.import_object(CONF.backup_manager)
with mock.patch("osprofiler.profiler.trace_cls") as mock_trace_cls:
side_effect = lambda value: value
mock_decorator = mock.MagicMock(side_effect=side_effect)
mock_trace_cls.return_value = mock_decorator
self.backup_mgr = \
importutils.import_object(CONF.backup_manager)
self.backup_mgr.host = 'testhost'
self.ctxt = context.get_admin_context()
self.backup_mgr.driver.set_initialized()

View File

@ -102,7 +102,11 @@ class BaseVolumeTestCase(test.TestCase):
self.flags(volumes_dir=vol_tmpdir,
notification_driver=["test"])
self.addCleanup(self._cleanup)
self.volume = importutils.import_object(CONF.volume_manager)
with mock.patch("osprofiler.profiler.trace_cls") as mock_trace_cls:
side_effect = lambda value: value
mock_decorator = mock.MagicMock(side_effect=side_effect)
mock_trace_cls.return_value = mock_decorator
self.volume = importutils.import_object(CONF.volume_manager)
self.context = context.get_admin_context()
self.context.user_id = 'fake'
self.context.project_id = 'fake'

View File

@ -40,6 +40,7 @@ import time
from oslo.config import cfg
from oslo import messaging
from osprofiler import profiler
from cinder import compute
from cinder import context
@ -180,6 +181,7 @@ class VolumeManager(manager.SchedulerDependentManager):
db=self.db,
host=self.host)
self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)

View File

@ -10,15 +10,15 @@ use = call:cinder.api:root_app_factory
[composite:openstack_volume_api_v1]
use = call:cinder.api.middleware.auth:pipeline_factory
noauth = request_id faultwrap sizelimit noauth apiv1
keystone = request_id faultwrap sizelimit authtoken keystonecontext apiv1
keystone_nolimit = request_id faultwrap sizelimit authtoken keystonecontext apiv1
noauth = request_id faultwrap sizelimit osprofiler noauth apiv1
keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
[composite:openstack_volume_api_v2]
use = call:cinder.api.middleware.auth:pipeline_factory
noauth = request_id faultwrap sizelimit noauth apiv2
keystone = request_id faultwrap sizelimit authtoken keystonecontext apiv2
keystone_nolimit = request_id faultwrap sizelimit authtoken keystonecontext apiv2
noauth = request_id faultwrap sizelimit osprofiler noauth apiv2
keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
[filter:request_id]
paste.filter_factory = cinder.openstack.common.middleware.request_id:RequestIdMiddleware.factory
@ -26,6 +26,11 @@ paste.filter_factory = cinder.openstack.common.middleware.request_id:RequestIdMi
[filter:faultwrap]
paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
[filter:osprofiler]
paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
hmac_keys = SECRET_KEY
enabled = yes
[filter:noauth]
paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory

View File

@ -2359,6 +2359,19 @@
#ringfile=/etc/oslo/matchmaker_ring.json
[profiler]
#
# Options defined in cinder.service
#
# If False fully disable profiling feature. (boolean value)
#profiler_enabled=false
# If False doesn't trace SQL requests. (boolean value)
#trace_sqlalchemy=false
[ssl]
#

View File

@ -13,6 +13,7 @@ oslo.config>=1.2.1
oslo.db>=0.2.0
oslo.messaging>=1.3.0
oslo.rootwrap>=1.3.0.0a1
osprofiler>=0.3.0
paramiko>=1.13.0
Paste
PasteDeploy>=1.5.0