Add api_workers for each service to defaults

Render {{ openstack_service_workers }} for workers
of each openstack service is not enough. There are
several services which has to have more workers because
there are more requests sent to them.

This patch is just adding default value for workers for
each service and sets {{ openstack_service_workers }} as
default, so value can be overrided in hostvars per server.
Nothing changed for normal user.

Change-Id: Ifa5863f8ec865bbf8e39c9b2add42c92abe40616
This commit is contained in:
Michal Arbet 2021-10-08 15:43:02 +02:00
parent 3f8683cccc
commit 3e8db91a1e
70 changed files with 123 additions and 53 deletions
ansible/roles
aodh
defaults
templates
barbican
cinder
cloudkitty
designate
defaults
templates
freezer
glance
gnocchi
heat
ironic
keystone
magnum
defaults
templates
manila
defaults
templates
masakari
mistral
defaults
templates
monasca
defaults
templates/monasca-api
murano
defaults
templates
neutron
defaults
templates
nova-cell
defaults
templates
nova
octavia
placement
sahara
defaults
templates
senlin
defaults
templates
solum
defaults
templates
swift
tacker
defaults
templates
trove
defaults
templates
vitrage
watcher
defaults
templates
zun
defaults
templates

@ -196,6 +196,7 @@ aodh_keystone_user: "aodh"
openstack_aodh_auth: "{{ openstack_auth }}"
aodh_api_workers: "{{ openstack_service_workers }}"
####################
# Kolla

@ -32,7 +32,7 @@ LogLevel info
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
CustomLog "{{ aodh_log_dir }}/aodh_wsgi_access.log" logformat
WSGIApplicationGroup %{GLOBAL}
WSGIDaemonProcess aodh group=aodh processes={{ openstack_service_workers }} threads=1 user=aodh
WSGIDaemonProcess aodh group=aodh processes={{ aodh_api_workers }} threads=1 user=aodh
WSGIProcessGroup aodh
WSGIScriptAlias / "{{ binary_path }}/aodh-api"
</VirtualHost>

@ -147,6 +147,7 @@ barbican_audit_role: "audit"
openstack_barbican_auth: "{{ openstack_auth }}"
barbican_api_workers: "{{ openstack_service_workers }}"
####################
# Kolla

@ -4,7 +4,7 @@ https-socket = {{ api_interface_address | put_address_in_context('url') }}:{{ ba
{% else %}
http-socket = {{ api_interface_address | put_address_in_context('url') }}:{{ barbican_api_listen_port }}
{% endif %}
processes = {{ openstack_service_workers }}
processes = {{ barbican_api_workers }}
lazy = true
vacuum = true
no-default-app = true

@ -207,6 +207,7 @@ openstack_cinder_auth: "{{ openstack_auth }}"
# allow room for slowness.
cinder_rpc_version_startup_delay: 30
cinder_api_workers: "{{ openstack_service_workers }}"
####################
# Cinder

@ -18,7 +18,7 @@ LogLevel info
{% endif %}
<VirtualHost *:{{ cinder_api_listen_port }}>
WSGIDaemonProcess cinder-api processes={{ openstack_service_workers }} threads=1 user=cinder group=cinder display-name=cinder-api
WSGIDaemonProcess cinder-api processes={{ cinder_api_workers }} threads=1 user=cinder group=cinder display-name=cinder-api
WSGIProcessGroup cinder-api
WSGIScriptAlias / /var/www/cgi-bin/cinder/cinder-wsgi
WSGIApplicationGroup %{GLOBAL}

@ -108,6 +108,8 @@ cloudkitty_keystone_user: "cloudkitty"
openstack_cloudkitty_auth: "{{ openstack_auth }}"
cloudkitty_api_workers: "{{ openstack_service_workers }}"
cloudkitty_processor_workers: "{{ openstack_service_workers }}"
####################
# Cloudkitty

@ -90,7 +90,7 @@ port = {{ cloudkitty_api_port }}
[orchestrator]
coordination_url = mysql://{{ cloudkitty_database_user }}:{{ cloudkitty_database_password }}@{{ cloudkitty_database_address }}/{{ cloudkitty_database_name }}
max_workers = {{ openstack_service_workers }}
max_workers = {{ cloudkitty_processor_workers }}
[storage]
backend = {{ cloudkitty_storage_backend }}

@ -23,7 +23,7 @@ LogLevel info
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
CustomLog "{{ cloudkitty_log_dir }}/cloudkitty-api-access.log" logformat
WSGIApplicationGroup %{GLOBAL}
WSGIDaemonProcess cloudkitty group=cloudkitty processes={{ openstack_service_workers }} threads=1 user=cloudkitty
WSGIDaemonProcess cloudkitty group=cloudkitty processes={{ cloudkitty_api_workers }} threads=1 user=cloudkitty
WSGIProcessGroup cloudkitty
WSGIScriptAlias / "{{ binary_path }}/cloudkitty-api"

@ -270,6 +270,12 @@ designate_logging_debug: "{{ openstack_logging_debug }}"
openstack_designate_auth: "{{ openstack_auth }}"
designate_api_workers: "{{ openstack_service_workers }}"
designate_worker_workers: "{{ openstack_service_workers }}"
designate_producer_workers: "{{ openstack_service_workers }}"
designate_central_workers: "{{ openstack_service_workers }}"
designate_sink_workers: "{{ openstack_service_workers }}"
designate_mdns_workers: "{{ openstack_service_workers }}"
####################
# Kolla

@ -8,12 +8,12 @@ transport_url = {{ rpc_transport_url }}
[service:central]
default_pool_id = {{ designate_pool_id }}
workers = {{ openstack_service_workers }}
workers = {{ designate_central_workers }}
[service:api]
listen = {{ api_interface_address | put_address_in_context('url') }}:{{ designate_api_listen_port }}
api_base_uri = {{ designate_internal_endpoint }}
workers = {{ openstack_service_workers }}
workers = {{ designate_api_workers }}
enable_api_admin = True
enable_host_header = True
enabled_extensions_admin = quotas, reports
@ -40,14 +40,14 @@ memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_addres
{% if service_name == 'designate-mdns' %}
[service:mdns]
listen = {{ 'dns' | kolla_address | put_address_in_context('url') }}:{{ designate_mdns_port }}
workers = {{ openstack_service_workers }}
workers = {{ designate_mdns_workers }}
{% endif %}
[service:worker]
workers = {{ openstack_service_workers }}
workers = {{ designate_worker_workers }}
[service:producer]
workers = {{ openstack_service_workers }}
workers = {{ designate_producer_workers }}
threads = 1000
enabled_tasks = None
@ -62,7 +62,7 @@ idle_timeout = 3600
{% if service_name == 'designate-sink' and designate_enable_notifications_sink | bool %}
[service:sink]
enabled_notification_handlers = nova_fixed, neutron_floatingip
workers = {{ openstack_service_workers }}
workers = {{ designate_sink_workers }}
[handler:nova_fixed]
#NOTE: zone_id must be manually filled an ID from openstack zone list

@ -85,6 +85,7 @@ freezer_keystone_user: "freezer"
openstack_freezer_auth: "{{ openstack_auth }}"
freezer_api_workers: "{{ openstack_service_workers }}"
####################
# Kolla

@ -18,7 +18,7 @@ LogLevel info
{% endif %}
<VirtualHost *:{{ freezer_api_port }}>
WSGIDaemonProcess freezer-api processes={{ openstack_service_workers }} threads=1 user=freezer display-name=freezer-api
WSGIDaemonProcess freezer-api processes={{ freezer_api_workers }} threads=1 user=freezer display-name=freezer-api
WSGIProcessGroup freezer-api
WSGIApplicationGroup %{GLOBAL}
WSGIScriptAlias / {{ python_path }}/freezer_api/cmd/wsgi.py

@ -209,6 +209,8 @@ glance_logging_debug: "{{ openstack_logging_debug }}"
openstack_glance_auth: "{{ openstack_auth }}"
glance_api_workers: "{{ openstack_service_workers }}"
###################
# Kolla
###################

@ -11,7 +11,7 @@ bind_host = 127.0.0.1
bind_host = {{ api_interface_address }}
{% endif %}
bind_port = {{ glance_api_listen_port }}
workers = {{ openstack_service_workers }}
workers = {{ glance_api_workers }}
{% if glance_store_backends %}
enabled_backends = {% for key in glance_store_backends %}{{ key.name }}:{{ key.type }}{% if not loop.last %}, {% endif %}{% endfor %}

@ -152,6 +152,8 @@ gnocchi_keystone_user: "gnocchi"
openstack_gnocchi_auth: "{{ openstack_auth }}"
gnocchi_api_workers: "{{ openstack_service_workers }}"
####################
# Keystone
####################

@ -23,7 +23,7 @@ LogLevel info
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
CustomLog "{{ gnocchi_log_dir }}/gnocchi-api-access.log" logformat
WSGIApplicationGroup %{GLOBAL}
WSGIDaemonProcess gnocchi group=gnocchi processes={{ openstack_service_workers }} threads=1 user=gnocchi
WSGIDaemonProcess gnocchi group=gnocchi processes={{ gnocchi_api_workers }} threads=1 user=gnocchi
WSGIProcessGroup gnocchi
WSGIScriptAlias / "{{ wsgi_path }}/gnocchi-api"

@ -166,6 +166,9 @@ heat_stack_owner_role: "heat_stack_owner"
openstack_heat_auth: "{{ openstack_auth }}"
heat_api_cfn_workers: "{{ openstack_service_workers }}"
heat_api_workers: "{{ openstack_service_workers }}"
heat_engine_workers: "{{ openstack_service_workers }}"
####################
# Kolla

@ -13,7 +13,7 @@ stack_domain_admin_password = {{ heat_domain_admin_password }}
stack_user_domain_name = heat_user_domain
{% if service_name == 'heat-engine' %}
num_engine_workers = {{ openstack_service_workers }}
num_engine_workers = {{ heat_engine_workers }}
{% endif %}
transport_url = {{ rpc_transport_url }}

@ -29,7 +29,7 @@ CustomLog "{{ heat_log_dir }}/apache-cfn-access.log" common
</IfModule>
<VirtualHost *:{{ heat_api_cfn_listen_port }}>
WSGIDaemonProcess heat-api-cfn processes={{ openstack_service_workers }} threads=1 user=heat group=heat display-name=heat-api-cfn
WSGIDaemonProcess heat-api-cfn processes={{ heat_api_cfn_workers }} threads=1 user=heat group=heat display-name=heat-api-cfn
WSGIProcessGroup heat-api-cfn
WSGIScriptAlias / {{ binary_path }}/heat-wsgi-api-cfn
WSGIApplicationGroup %{GLOBAL}

@ -29,7 +29,7 @@ CustomLog "{{ heat_log_dir }}/apache-access.log" common
</IfModule>
<VirtualHost *:{{ heat_api_listen_port }}>
WSGIDaemonProcess heat-api processes={{ openstack_service_workers }} threads=1 user=heat group=heat display-name=heat-api
WSGIDaemonProcess heat-api processes={{ heat_api_workers }} threads=1 user=heat group=heat display-name=heat-api
WSGIProcessGroup heat-api
WSGIScriptAlias / {{ binary_path }}/heat-wsgi-api
WSGIApplicationGroup %{GLOBAL}

@ -246,6 +246,7 @@ openstack_ironic_auth: "{{ openstack_auth }}"
openstack_ironic_inspector_auth: "{{ openstack_auth }}"
ironic_api_workers: "{{ openstack_service_workers }}"
#########
# Ironic

@ -32,7 +32,7 @@ LogLevel info
{% endif %}
<VirtualHost *:{{ ironic_api_listen_port }}>
WSGIDaemonProcess ironic-api processes={{ openstack_service_workers }} threads=1 user=ironic group=ironic display-name=ironic-api
WSGIDaemonProcess ironic-api processes={{ ironic_api_workers }} threads=1 user=ironic group=ironic display-name=ironic-api
WSGIProcessGroup ironic-api
WSGIScriptAlias / {{ wsgi_directory }}/ironic-api-wsgi
WSGIApplicationGroup %{GLOBAL}

@ -153,6 +153,7 @@ keystone_logging_debug: "{{ openstack_logging_debug }}"
openstack_keystone_auth: "{{ openstack_auth }}"
keystone_api_workers: "{{ openstack_service_workers }}"
####################
# Kolla

@ -39,7 +39,7 @@ LogLevel info
<VirtualHost *:{{ keystone_public_listen_port }}>
WSGIDaemonProcess keystone-public processes={{ openstack_service_workers }} threads=1 user=keystone group=keystone display-name=keystone-public
WSGIDaemonProcess keystone-public processes={{ keystone_api_workers }} threads=1 user=keystone group=keystone display-name=keystone-public
WSGIProcessGroup keystone-public
WSGIScriptAlias / {{ binary_path }}/keystone-wsgi-public
WSGIApplicationGroup %{GLOBAL}
@ -112,7 +112,7 @@ LogLevel info
# NOTE(yoctozepto): Admin port settings are kept only for upgrade compatibility.
# TODO(yoctozepto): Remove after Zed.
<VirtualHost *:{{ keystone_admin_listen_port }}>
WSGIDaemonProcess keystone-admin processes={{ openstack_service_workers }} threads=1 user=keystone group=keystone display-name=keystone-admin
WSGIDaemonProcess keystone-admin processes={{ keystone_api_workers }} threads=1 user=keystone group=keystone display-name=keystone-admin
WSGIProcessGroup keystone-admin
WSGIScriptAlias / {{ binary_path }}/keystone-wsgi-admin
WSGIApplicationGroup %{GLOBAL}

@ -125,6 +125,8 @@ magnum_trustee_domain: "magnum"
openstack_magnum_auth: "{{ openstack_auth }}"
magnum_api_workers: "{{ openstack_service_workers }}"
magnum_conductor_workers: "{{ openstack_service_workers }}"
####################
# Kolla

@ -11,7 +11,12 @@ transport_url = {{ rpc_transport_url }}
[api]
port = {{ magnum_api_port }}
host = {{ api_interface_address }}
workers = {{ openstack_service_workers }}
workers = {{ magnum_api_workers }}
{% endif %}
{% if service_name == 'magnum-conductor' %}
[conductor]
workers = {{ magnum_conductor_workers }}
{% endif %}
[database]

@ -188,6 +188,7 @@ manila_service_instance_password: "manila"
openstack_manila_auth: "{{ openstack_auth }}"
manila_api_workers: "{{ openstack_service_workers }}"
####################
# Manila

@ -8,7 +8,7 @@ my_ip = {{ api_interface_address }}
osapi_share_listen = {{ api_interface_address }}
osapi_share_listen_port = {{ manila_api_port }}
osapi_share_workers = {{ openstack_service_workers }}
osapi_share_workers = {{ manila_api_workers }}
rootwrap_config = /etc/manila/rootwrap.conf
api_paste_config = /etc/manila/api-paste.ini

@ -119,6 +119,7 @@ masakari_keystone_user: "masakari"
openstack_masakari_auth: "{{ openstack_auth }}"
masakari_api_workers: "{{ openstack_service_workers }}"
####################
# Kolla

@ -31,7 +31,7 @@ LogLevel info
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
CustomLog "{{ masakari_log_dir }}/masakari_wsgi_access.log" logformat
WSGIApplicationGroup %{GLOBAL}
WSGIDaemonProcess masakari group=masakari processes={{ openstack_service_workers }} threads=1 user=masakari
WSGIDaemonProcess masakari group=masakari processes={{ masakari_api_workers }} threads=1 user=masakari
WSGIProcessGroup masakari
WSGIScriptAlias / "{{ binary_path }}/masakari-wsgi"
</VirtualHost>

@ -176,6 +176,8 @@ mistral_keystone_user: "mistral"
openstack_mistral_auth: "{{ openstack_auth }}"
mistral_api_workers: "{{ openstack_service_workers }}"
####################
# Kolla
####################

@ -20,7 +20,7 @@ transport_url = {{ rpc_transport_url }}
[api]
host = {{ api_interface_address }}
port = {{ mistral_api_port }}
api_workers = {{ openstack_service_workers }}
api_workers = {{ mistral_api_workers }}
{% elif service_name == 'mistral-engine' %}
[engine]
host = {{ api_interface_address }}

@ -316,6 +316,8 @@ monasca_api_public_endpoint: "{{ monasca_api_public_base_endpoint }}/v2.0"
monasca_logging_debug: "{{ openstack_logging_debug }}"
monasca_api_workers: "{{ openstack_service_workers }}"
####################
# Keystone
####################

@ -22,7 +22,7 @@ LogLevel info
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
CustomLog "{{ monasca_log_dir }}/monasca-api-access.log" logformat
WSGIApplicationGroup %{GLOBAL}
WSGIDaemonProcess monasca-api group=monasca processes={{ openstack_service_workers }} threads=1 user=monasca
WSGIDaemonProcess monasca-api group=monasca processes={{ monasca_api_workers }} threads=1 user=monasca
WSGIProcessGroup monasca-api
WSGIScriptAlias / {{ wsgi_path }}/wsgi.py
WSGIPassAuthorization On

@ -80,6 +80,9 @@ openstack_murano_auth: "{{ openstack_auth }}"
murano_agent_timeout: 3600
murano_engine_workers: "{{ openstack_service_workers }}"
murano_api_workers: "{{ openstack_service_workers }}"
####################
# Kolla
####################

@ -11,7 +11,7 @@ bind_port = {{ murano_api_port }}
transport_url = {{ rpc_transport_url }}
[engine]
engine_workers = {{ openstack_service_workers }}
engine_workers = {{ murano_engine_workers }}
agent_timeout = {{ murano_agent_timeout }}
[database]
@ -50,7 +50,7 @@ cafile = {{ openstack_cacert }}
[murano]
url = {{ murano_internal_endpoint }}
api_workers = {{ openstack_service_workers }}
api_workers = {{ murano_api_workers }}
[oslo_messaging_notifications]
transport_url = {{ notify_transport_url }}

@ -572,6 +572,9 @@ neutron_l3_agent_host_ipv6_neigh_gc_thresh1: "{{ neutron_l3_agent_host_ipv4_neig
neutron_l3_agent_host_ipv6_neigh_gc_thresh2: "{{ neutron_l3_agent_host_ipv4_neigh_gc_thresh2 }}"
neutron_l3_agent_host_ipv6_neigh_gc_thresh3: "{{ neutron_l3_agent_host_ipv4_neigh_gc_thresh3 }}"
neutron_api_workers: "{{ openstack_service_workers }}"
neutron_metadata_workers: "{{ openstack_service_workers }}"
####################
# Subprojects
####################

@ -17,8 +17,8 @@ bind_port = {{ neutron_server_listen_port }}
api_paste_config = /etc/neutron/api-paste.ini
api_workers = {{ openstack_service_workers }}
metadata_workers = {{ openstack_service_workers }}
api_workers = {{ neutron_api_workers }}
metadata_workers = {{ neutron_metadata_workers }}
rpc_workers = {{ openstack_service_rpc_workers }}
rpc_state_report_workers = {{ openstack_service_rpc_workers }}

@ -493,6 +493,8 @@ nova_cell_conductor_has_api_database: "yes"
# register itself.
nova_compute_registration_fatal: false
nova_cell_conductor_workers: "{{ openstack_service_workers }}"
####################
# Notification
####################

@ -37,7 +37,7 @@ compute_monitors=nova.compute.monitors.cpu.virt_driver
transport_url = {{ nova_cell_rpc_transport_url }}
[conductor]
workers = {{ openstack_service_workers }}
workers = {{ nova_cell_conductor_workers }}
{% if nova_console == 'novnc' %}
[vnc]

@ -188,6 +188,11 @@ nova_services_require_policy_json:
nova_enable_external_metadata: "no"
nova_api_workers: "{{ openstack_service_workers }}"
nova_superconductor_workers: "{{ openstack_service_workers }}"
nova_metadata_api_workers: "{{ openstack_service_workers }}"
nova_scheduler_workers: "{{ openstack_service_workers }}"
####################
# Keystone
####################

@ -33,7 +33,7 @@ LogLevel info
{% endif %}
<VirtualHost *:{{ nova_api_listen_port }}>
WSGIDaemonProcess nova-api processes={{ openstack_service_workers }} threads=1 user=nova group=nova display-name=nova-api
WSGIDaemonProcess nova-api processes={{ nova_api_workers }} threads=1 user=nova group=nova display-name=nova-api
WSGIProcessGroup nova-api
WSGIScriptAlias / {{ wsgi_directory }}/nova-api-wsgi
WSGIApplicationGroup %{GLOBAL}
@ -52,7 +52,7 @@ LogLevel info
</VirtualHost>
<VirtualHost *:{{ nova_metadata_listen_port }}>
WSGIDaemonProcess nova-metadata processes={{ openstack_service_workers }} threads=1 user=nova group=nova display-name=nova-metadata-api
WSGIDaemonProcess nova-metadata processes={{ nova_metadata_api_workers }} threads=1 user=nova group=nova display-name=nova-metadata-api
WSGIProcessGroup nova-metadata
WSGIScriptAlias / {{ wsgi_directory }}/nova-metadata-wsgi
WSGIApplicationGroup %{GLOBAL}

@ -47,7 +47,7 @@ vendordata_jsonfile_path = /etc/nova/vendordata.json
# Super conductor
[conductor]
workers = {{ openstack_service_workers }}
workers = {{ nova_superconductor_workers }}
[oslo_middleware]
enable_proxy_headers_parsing = True
@ -167,7 +167,7 @@ max_attempts = 10
# periodic run must be disabled to avoid random failures (where both try to map)
# -1 is default and means periodic discovery is disabled
discover_hosts_in_cells_interval = -1
workers = {{ openstack_service_workers }}
workers = {{ nova_scheduler_workers }}
[placement]
auth_type = password

@ -207,6 +207,10 @@ octavia_service_auth_project: "service"
openstack_octavia_auth: "{{ openstack_auth }}"
octavia_api_workers: "{{ openstack_service_workers }}"
octavia_healthmanager_health_workers: "{{ openstack_service_workers }}"
octavia_healthmanager_stats_workers: "{{ openstack_service_workers }}"
####################
# Keystone
####################

@ -19,7 +19,7 @@ LogLevel info
{% endif %}
<VirtualHost *:{{ octavia_api_listen_port }}>
WSGIDaemonProcess octavia-api processes={{ openstack_service_workers }} threads=1 user=octavia group=octavia display-name=octavia-api
WSGIDaemonProcess octavia-api processes={{ octavia_api_workers }} threads=1 user=octavia group=octavia display-name=octavia-api
WSGIProcessGroup octavia-api
WSGIScriptAlias / {{ wsgi_directory }}/octavia-wsgi
WSGIApplicationGroup %{GLOBAL}

@ -80,8 +80,8 @@ bind_ip = {{ octavia_network_interface_address }}
heartbeat_key = insecure
controller_ip_port_list = {% for host in groups['octavia-health-manager'] %}{{ 'octavia_network' | kolla_address(host) | put_address_in_context('url') }}:{{ octavia_health_manager_port }}{% if not loop.last %},{% endif %}{% endfor %}
stats_update_threads = {{ openstack_service_workers }}
health_update_threads = {{ openstack_service_workers }}
stats_update_threads = {{ octavia_healthmanager_stats_workers }}
health_update_threads = {{ octavia_healthmanager_health_workers }}
[controller_worker]
amp_ssh_key_name = {{ octavia_amp_ssh_key_name }}

@ -74,6 +74,7 @@ placement_logging_debug: "{{ openstack_logging_debug }}"
openstack_placement_auth: "{{ openstack_auth }}"
placement_api_workers: "{{ openstack_service_workers }}"
####################
# Notification

@ -20,7 +20,7 @@ LogLevel info
{% endif %}
<VirtualHost *:{{ placement_api_listen_port }}>
WSGIDaemonProcess placement-api processes={{ openstack_service_workers }} threads=1 user=placement group=placement display-name=placement-api
WSGIDaemonProcess placement-api processes={{ placement_api_workers }} threads=1 user=placement group=placement display-name=placement-api
WSGIProcessGroup placement-api
WSGIScriptAlias / {{ wsgi_directory }}/placement-api
WSGIApplicationGroup %{GLOBAL}

@ -112,6 +112,7 @@ sahara_keystone_user: "sahara"
openstack_sahara_auth: "{{ openstack_auth }}"
sahara_api_workers: "{{ openstack_service_workers }}"
####################
## Kolla

@ -5,7 +5,7 @@ port = {{ sahara_api_port }}
host = {{ api_interface_address }}
transport_url = {{ rpc_transport_url }}
api_workers = {{ openstack_service_workers }}
api_workers = {{ sahara_api_workers }}
use_floating_ips = False
use_namespaces = True
use_rootwrap = True

@ -175,6 +175,10 @@ senlin_keystone_user: "senlin"
openstack_senlin_auth: "{{ openstack_auth }}"
senlin_api_workers: "{{ openstack_service_workers }}"
senlin_conductor_workers: "{{ openstack_service_workers }}"
senlin_engine_workers: "{{ openstack_service_workers }}"
senlin_health_manager_workers: "{{ openstack_service_workers }}"
####################
# Kolla

@ -9,7 +9,7 @@ transport_url = {{ rpc_transport_url }}
[senlin_api]
bind_host = {{ api_interface_address }}
bind_port = {{ senlin_api_listen_port }}
workers = {{ openstack_service_workers }}
workers = {{ senlin_api_workers }}
{% endif %}
[authentication]
@ -22,7 +22,7 @@ service_project_domain = default
{% if service_name == 'senlin-conductor' %}
[conductor]
workers = {{ openstack_service_workers }}
workers = {{ senlin_conductor_workers }}
{% endif %}
[database]
@ -33,12 +33,12 @@ max_retries = -1
{% if service_name == 'senlin-engine' %}
[engine]
workers = {{ openstack_service_workers }}
workers = {{ senlin_engine_workers }}
{% endif %}
{% if service_name == 'senlin-health-manager' %}
[health_manager]
workers = {{ openstack_service_workers }}
workers = {{ senlin_health_manager_workers }}
{% endif %}
[keystone_authtoken]

@ -190,6 +190,7 @@ solum_keystone_user: "solum"
openstack_solum_auth: "{{ openstack_auth }}"
solum_api_workers: "{{ openstack_service_workers }}"
####################
# Kolla

@ -11,7 +11,7 @@ bind_port = {{ solum_application_deployment_port }}
[api]
image_format = vm
port = {{ solum_application_deployment_port }}
workers = {{ openstack_service_workers }}
workers = {{ solum_api_workers }}
{% if service_name == 'solum-api' %}
host = {{ api_interface_address }}
{% endif %}

@ -73,6 +73,11 @@ swift_enable_rolling_upgrade: "yes"
swift_extra_ring_files: []
swift_account_workers: "{{ openstack_service_workers }}"
swift_container_workers: "{{ openstack_service_workers }}"
swift_object_workers: "{{ openstack_service_workers }}"
swift_proxy_server_workers: "{{ openstack_service_workers }}"
####################
# Keystone
####################

@ -9,7 +9,7 @@ log_udp_port = {{ syslog_udp_port }}
log_name = {{ service_name }}
log_facility = {{ syslog_swift_facility }}
log_level = {{ swift_log_level }}
workers = {{ openstack_service_workers }}
workers = {{ swift_account_workers }}
[pipeline:main]
pipeline = {% if enable_swift_recon %}recon {% endif %}account-server

@ -9,7 +9,7 @@ log_udp_port = {{ syslog_udp_port }}
log_name = {{ service_name }}
log_facility = {{ syslog_swift_facility }}
log_level = {{ swift_log_level }}
workers = {{ openstack_service_workers }}
workers = {{ swift_container_workers }}
[pipeline:main]
pipeline = {% if enable_swift_recon %}recon {% endif %}container-server

@ -10,7 +10,7 @@ log_udp_port = {{ syslog_udp_port }}
log_name = {{ service_name }}
log_facility = {{ syslog_swift_facility }}
log_level = {{ swift_log_level }}
workers = {{ openstack_service_workers }}
workers = {{ swift_object_workers }}
[pipeline:main]
{% if service_name == 'swift-object-expirer' %}

@ -7,7 +7,7 @@ log_udp_port = {{ syslog_udp_port }}
log_name = {{ service_name }}
log_facility = {{ syslog_swift_facility }}
log_level = {{ swift_log_level }}
workers = {{ openstack_service_workers }}
workers = {{ swift_proxy_server_workers }}
[pipeline:main]
pipeline = catch_errors gatekeeper healthcheck cache container_sync bulk tempurl ratelimit authtoken {% if enable_swift_s3api | bool %}s3api s3token {% endif %}keystoneauth container_quotas account_quotas slo dlo {% if enable_ceilometer | bool %}ceilometer {% endif %}proxy-server

@ -122,6 +122,8 @@ tacker_keystone_user: "tacker"
openstack_tacker_auth: "{{ openstack_auth }}"
tacker_api_workers: "{{ openstack_service_workers }}"
###################
# Kolla
####################

@ -6,7 +6,7 @@ transport_url = {{ rpc_transport_url }}
bind_host = {{ api_interface_address }}
bind_port = {{ tacker_server_port }}
api_workers = {{ openstack_service_workers }}
api_workers = {{ tacker_api_workers }}
service_plugins = nfvo,vnfm
[nfvo]

@ -144,6 +144,7 @@ trove_keystone_user: "trove"
openstack_trove_auth: "{{ openstack_auth }}"
trove_api_workers: "{{ openstack_service_workers }}"
####################
# Kolla

@ -7,7 +7,7 @@ host = {{ api_interface_address }}
bind_port = {{ trove_api_port }}
bind_host = {{ api_interface_address }}
trove_api_workers = {{ openstack_service_workers }}
trove_api_workers = {{ trove_api_workers }}
auth_strategy = keystone
transport_url = {{ rpc_transport_url }}

@ -203,6 +203,8 @@ vitrage_keystone_user: "vitrage"
openstack_vitrage_auth: "{{ openstack_auth }}"
vitrage_api_workers: "{{ openstack_service_workers }}"
#####################
# Datasources
#####################

@ -34,7 +34,7 @@ LogLevel info
ServerSignature Off
CustomLog "{{ vitrage_log_dir }}/vitrage_wsgi_access.log" combined
WSGIApplicationGroup %{GLOBAL}
WSGIDaemonProcess vitrage group=vitrage processes={{ openstack_service_workers }} threads=1 user=vitrage
WSGIDaemonProcess vitrage group=vitrage processes={{ vitrage_api_workers }} threads=1 user=vitrage
WSGIProcessGroup vitrage
WSGIScriptAlias / "/var/www/cgi-bin/vitrage/app.wsgi"
{% if enable_vitrage_prometheus_datasource | bool %}

@ -141,6 +141,7 @@ watcher_keystone_user: "watcher"
openstack_watcher_auth: "{{ openstack_auth }}"
watcher_api_workers: "{{ openstack_service_workers }}"
####################
# Kolla

@ -9,7 +9,7 @@ transport_url = {{ rpc_transport_url }}
[api]
host = {{ api_interface_address }}
port = {{ watcher_api_port }}
workers = {{ openstack_service_workers }}
workers = {{ watcher_api_workers }}
{% endif %}
[database]

@ -194,6 +194,7 @@ zun_keystone_user: "zun"
openstack_zun_auth: "{{ openstack_auth }}"
zun_api_workers: "{{ openstack_service_workers }}"
####################
# Kolla

@ -35,7 +35,7 @@ LogLevel info
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat
CustomLog "{{ zun_log_dir }}/zun_api_wsgi_access.log" logformat
WSGIApplicationGroup %{GLOBAL}
WSGIDaemonProcess zun group=zun processes={{ openstack_service_workers }} threads=1 user=zun
WSGIDaemonProcess zun group=zun processes={{ zun_api_workers }} threads=1 user=zun
WSGIProcessGroup zun
WSGIScriptAlias / "/var/www/cgi-bin/zun/app.wsgi"
</VirtualHost>