Sam Yaple 347730cec8 Remove vip for rabbitmq
Adjust all the configs to list all the rabbitmq hosts rather than
running rabbitmq through the VIP. This is made possible by clusterer
which has already merged.

Change-Id: I5db48f5f10ec68f4c8863a29bc13984f6845a4f9
Partially-Implements: blueprint rabbitmq-clusterer
2015-11-05 15:43:20 +00:00

145 lines
5.3 KiB
Django/Jinja

# nova.conf
[DEFAULT]
verbose = true
debug = true
syslog_log_facility=LOG_LOCAL0
use_syslog=yes
api_paste_config = /etc/nova/api-paste.ini
state_path = /var/lib/nova
osapi_compute_listen = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
osapi_compute_listen_port = {{ nova_api_port }}
metadata_listen = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
metadata_listen_port = {{ nova_metadata_port }}
ec2_listen = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
ec2_listen_port = {{ nova_api_ec2_port }}
notification_driver = noop
security_group_api = neutron
network_api_class = nova.network.neutronv2.api.API
firewall_driver = nova.virt.firewall.NoopFirewallDriver
{% if neutron_plugin_agent == "openvswitch" %}
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
{% elif neutron_plugin_agent == "linuxbridge" %}
linuxnet_interface_driver = nova.network.linux_net.BridgeInterfaceDriver
{% endif %}
allow_resize_to_same_host = true
memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + api_interface]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
# Though my_ip is not used directly, lots of other variables use $my_ip
my_ip = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
{% if nova_console == 'novnc' %}
[vnc]
vncserver_listen = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
vncserver_proxyclient_address = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
{% if inventory_hostname in groups['compute'] %}
novncproxy_base_url = http://{{ kolla_internal_address }}:{{ nova_novncproxy_port }}/vnc_auto.html
{% endif %}
novncproxy_host = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
novncproxy_port = {{ nova_novncproxy_port }}
{% elif nova_console == 'spice' %}
[vnc]
# We have to turn off vnc to use spice
enabled = false
[spice]
server_listen = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
server_proxyclient_address = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
{% if inventory_hostname in groups['compute'] %}
html5proxy_base_url = http://{{ kolla_internal_address }}:{{ nova_spicehtml5proxy_port }}/spice_auto.html
{% endif %}
html5proxy_host = {{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}
html5proxy_port = {{ nova_html5proxy_port }}
{% endif %}
{% if enable_ironic | bool %}
compute_driver = nova.virt.ironic.IronicDriver
scheduler_host_manager = nova.scheduler.ironic_host_manager.IronicHostManager
ram_allocation_ratio = 1.0
reserved_host_memory_mb = 0
compute_manager = ironic.nova.compute.manager.ClusteredComputeManager
scheduler_use_baremetal_filters = True
{% else %}
compute_driver = libvirt.LibvirtDriver
{% endif %}
{% if enable_ironic | bool %}
[ironic]
#(TODO) remember to update this once discoverd is replaced by inspector
admin_username = {{ ironic_keystone_user }}
admin_password = {{ ironic_keystone_password }}
admin_url = {{ openstack_auth.auth_url }}
admin_tenant_name = {{ openstack_auth.project_name }}
api_endpoint = http://{{ kolla_internal_address }}:{{ ironic_api_port }}/v1
{% endif %}
[oslo_messaging_rabbit]
rabbit_userid = {{ rabbitmq_user }}
rabbit_password = {{ rabbitmq_password }}
rabbit_ha_queues = true
rabbit_hosts = {% for host in groups['rabbitmq'] %}{{ hostvars[host]['ansible_' + api_interface]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[glance]
{% if enable_ceph | bool %}
host = {{ kolla_internal_address }}
port = {{ glance_api_port }}
{% else %}
api_servers = {% for host in groups['glance-api'] %}{{ hostvars[host]['ansible_' + api_interface]['ipv4']['address'] }}:{{ glance_api_port }}{% if not loop.last %},{% endif %}{% endfor %}
num_retries = {{ groups['glance-api'] | length }}
{% endif %}
[cinder]
catalog_info = volume:cinder:internalURL
[neutron]
url = http://{{ kolla_internal_address }}:{{ neutron_server_port }}
auth_strategy = keystone
metadata_proxy_shared_secret = {{ metadata_secret }}
service_metadata_proxy = true
auth_url = http://{{ kolla_internal_address }}:{{ keystone_admin_port }}
auth_plugin = password
project_domain_name = default
user_domain_id = default
project_name = service
username = neutron
password = {{ neutron_keystone_password }}
[database]
connection = mysql://{{ nova_database_user }}:{{ nova_database_password }}@{{ nova_database_address }}/{{ nova_database_name }}
[keystone_authtoken]
auth_uri = http://{{ kolla_internal_address }}:{{ keystone_public_port }}
auth_url = http://{{ kolla_internal_address }}:{{ keystone_admin_port }}
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = nova
password = {{ nova_keystone_password }}
{% if enable_ceph | bool %}
[libvirt]
images_type = rbd
images_rbd_pool = {{ ceph_nova_pool_name }}
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = nova
rbd_secret_uuid = {{ rbd_secret_uuid }}
disk_cachemodes="network=writeback"
live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"
hw_disk_discard = unmap
{% endif %}