From cacf08f0a6d009301f28c6723f399fb8c1daf267 Mon Sep 17 00:00:00 2001
From: Jeffrey Zhang <zhang.lei.fly@gmail.com>
Date: Wed, 12 Jul 2017 16:38:11 +0800
Subject: [PATCH] Remove all kolla-kubernetes configurations

kolla-kubernetes is using its own configuration generation[0], so it is
time for kolla-ansible to remove the related code to simplify the
logical.

[0] https://github.com/openstack/kolla-kubernetes/tree/master/ansible

Change-Id: I7bb0b7fe3b8eea906613e936d5e9d19f4f2e80bb
Implements: blueprint clean-k8s-config
---
 ansible/group_vars/all.yml                    | 19 +++-------
 .../roles/barbican/templates/barbican.conf.j2 |  4 --
 ansible/roles/ceph/templates/ceph-mon.json.j2 |  4 --
 ansible/roles/ceph/templates/ceph-osd.json.j2 |  4 --
 ansible/roles/ceph/templates/ceph.conf.j2     |  4 --
 ansible/roles/cinder/templates/cinder.conf.j2 | 13 ++-----
 .../templates/conf/input/01-syslog.conf.j2    |  4 --
 .../templates/elasticsearch.yml.j2            |  6 +--
 .../roles/glance/templates/glance-api.conf.j2 | 19 +---------
 .../glance/templates/glance-registry.conf.j2  | 15 +-------
 .../roles/haproxy/templates/haproxy.cfg.j2    |  2 -
 ansible/roles/heat/templates/heat.conf.j2     | 19 ++--------
 .../roles/horizon/templates/local_settings.j2 |  7 +---
 ansible/roles/ironic/tasks/config.yml         |  1 -
 ansible/roles/ironic/tasks/precheck.yml       |  1 -
 .../ironic/templates/ironic-dnsmasq.conf.j2   |  7 ----
 .../ironic/templates/ironic-inspector.conf.j2 | 21 -----------
 .../roles/ironic/templates/ironic-pxe.json.j2 |  2 -
 ansible/roles/ironic/templates/ironic.conf.j2 | 19 +---------
 .../ironic/templates/pxelinux.default.j2      |  4 --
 ansible/roles/karbor/templates/karbor.conf.j2 |  4 +-
 .../roles/keystone/templates/keystone.conf.j2 | 12 +-----
 ansible/roles/kibana/templates/kibana.yml.j2  |  2 +-
 ansible/roles/mariadb/templates/galera.cnf.j2 |  4 --
 ansible/roles/neutron/defaults/main.yml       | 15 +++-----
 .../neutron/templates/metadata_agent.ini.j2   |  2 +-
 .../roles/neutron/templates/neutron.conf.j2   |  8 +---
 ansible/roles/nova/defaults/main.yml          |  2 +-
 ansible/roles/nova/templates/nova.conf.j2     | 37 +++----------------
 ansible/roles/openvswitch/defaults/main.yml   |  2 -
 .../templates/openvswitch-db-server.json.j2   |  2 +-
 .../rabbitmq/templates/rabbitmq-env.conf.j2   |  2 -
 .../rabbitmq/templates/rabbitmq.config.j2     |  6 +--
 ansible/roles/sahara/templates/sahara.conf.j2 |  4 --
 .../searchlight/templates/searchlight.conf.j2 |  6 +--
 ansible/roles/solum/templates/solum.conf.j2   |  2 +-
 ansible/roles/zun/templates/zun.conf.j2       |  2 +-
 37 files changed, 46 insertions(+), 241 deletions(-)

diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml
index 8d7dff552f..3e3124c18f 100644
--- a/ansible/group_vars/all.yml
+++ b/ansible/group_vars/all.yml
@@ -19,10 +19,6 @@ node_config_directory: "/etc/kolla/{{ project }}"
 ###################
 # Kolla options
 ###################
-
-# Which orchestration engine to use. Valid options are [ ANSIBLE, KUBERNETES ]
-orchestration_engine: "ANSIBLE"
-
 # Valid options are [ COPY_ONCE, COPY_ALWAYS ]
 config_strategy: "COPY_ALWAYS"
 
@@ -53,13 +49,8 @@ kolla_dev_mode: "no"
 # kolla-kubernetes
 ####################
 # By default, Kolla API services bind to the network address assigned
-# to the api_interface.  Allow the bind address to be an override.  In
-# some cases (Kubernetes), the api_interface address is not known
-# until container runtime, and thus it is necessary to bind to all
-# interfaces "0.0.0.0".  When used outside of Kubernetes, binding to
-# all interfaces may present a security issue, and thus is not
-# recommended.
-api_interface_address:  "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] if orchestration_engine == 'ANSIBLE' else '0.0.0.0' }}"
+# to the api_interface.  Allow the bind address to be an override.
+api_interface_address:  "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
 
 ################
 # Chrony options
@@ -129,7 +120,7 @@ cluster_interface: "{{ network_interface }}"
 tunnel_interface: "{{ network_interface }}"
 bifrost_network_interface: "{{ network_interface }}"
 dns_interface: "{{ network_interface }}"
-tunnel_interface_address: "{{ hostvars[inventory_hostname]['ansible_' + tunnel_interface]['ipv4']['address'] if orchestration_engine == 'ANSIBLE' else '0.0.0.0' }}"
+tunnel_interface_address: "{{ hostvars[inventory_hostname]['ansible_' + tunnel_interface]['ipv4']['address'] }}"
 
 # Valid options are [ openvswitch, linuxbridge, sfc ]
 neutron_plugin_agent: "openvswitch"
@@ -286,8 +277,8 @@ openstack_region_name: "RegionOne"
 multiple_regions_names:
     - "{{ openstack_region_name }}"
 
-openstack_service_workers: "{{ [ansible_processor_vcpus, 5]|min if orchestration_engine == 'ANSIBLE' else '1'}}"
-openstack_service_rpc_workers: "{{ [ansible_processor_vcpus, 3]|min if orchestration_engine == 'ANSIBLE' else '1'}}"
+openstack_service_workers: "{{ [ansible_processor_vcpus, 5]|min }}"
+openstack_service_rpc_workers: "{{ [ansible_processor_vcpus, 3]|min }}"
 
 # Optionally allow Kolla to set sysctl values
 set_sysctl: "yes"
diff --git a/ansible/roles/barbican/templates/barbican.conf.j2 b/ansible/roles/barbican/templates/barbican.conf.j2
index 33867fd14a..cd970bd7ba 100644
--- a/ansible/roles/barbican/templates/barbican.conf.j2
+++ b/ansible/roles/barbican/templates/barbican.conf.j2
@@ -59,11 +59,7 @@ auth_type = password
 
 memcache_security_strategy = ENCRYPT
 memcache_secret_key = {{ memcache_secret_key }}
-{% if orchestration_engine == 'KUBERNETES' %}
-memcache_servers = {{ memcached_servers }}
-{% else %}
 memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-{% endif %}
 
 [service_credentials]
 auth_url = {{ internal_protocol  }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
diff --git a/ansible/roles/ceph/templates/ceph-mon.json.j2 b/ansible/roles/ceph/templates/ceph-mon.json.j2
index 11dfccf6e3..7ad4b19bac 100644
--- a/ansible/roles/ceph/templates/ceph-mon.json.j2
+++ b/ansible/roles/ceph/templates/ceph-mon.json.j2
@@ -1,9 +1,5 @@
 {
-{%- if orchestration_engine == 'KUBERNETES' %}
-    "command": "/usr/bin/ceph-mon -f -i @MONID@ --public-addr @MONADDR@:6789",
-{%- else %}
     "command": "/usr/bin/ceph-mon -f -i {{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }} --public-addr {{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }}:6789",
-{%- endif %}
     "config_files": [
         {
             "source": "{{ container_config_directory }}/ceph.conf",
diff --git a/ansible/roles/ceph/templates/ceph-osd.json.j2 b/ansible/roles/ceph/templates/ceph-osd.json.j2
index 7118f1b160..043a65ea14 100644
--- a/ansible/roles/ceph/templates/ceph-osd.json.j2
+++ b/ansible/roles/ceph/templates/ceph-osd.json.j2
@@ -1,9 +1,5 @@
 {
-{%- if orchestration_engine == 'KUBERNETES' %}
-    "command": "/usr/bin/ceph-osd -f --public-addr @HOSTADDR@ --cluster-addr @CLUSTERADDR@",
-{%- else %}
     "command": "/usr/bin/ceph-osd -f --public-addr {{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }} --cluster-addr {{ hostvars[inventory_hostname]['ansible_' + cluster_interface]['ipv4']['address'] }}",
-{%- endif %}
     "config_files": [
         {
             "source": "{{ container_config_directory }}/ceph.conf",
diff --git a/ansible/roles/ceph/templates/ceph.conf.j2 b/ansible/roles/ceph/templates/ceph.conf.j2
index af87282d0a..fa5222fc24 100644
--- a/ansible/roles/ceph/templates/ceph.conf.j2
+++ b/ansible/roles/ceph/templates/ceph.conf.j2
@@ -6,13 +6,11 @@ log to stderr = false
 err to stderr = false
 
 fsid = {{ ceph_cluster_fsid }}
-{% if orchestration_engine != 'KUBERNETES' %}
 mon initial members = {% for host in groups['ceph-mon'] %}{{ hostvars[host]['ansible_' + hostvars[host]['storage_interface']]['ipv4']['address'] }}{% if not loop.last %}, {% endif %}{% endfor %}
 
 mon host = {% for host in groups['ceph-mon'] %}{{ hostvars[host]['ansible_' + hostvars[host]['storage_interface']]['ipv4']['address'] }}{% if not loop.last %}, {% endif %}{% endfor %}
 
 mon addr = {% for host in groups['ceph-mon'] %}{{ hostvars[host]['ansible_' + hostvars[host]['storage_interface']]['ipv4']['address'] }}:6789{% if not loop.last %}, {% endif %}{% endfor %}
-{% endif %}
 
 auth cluster required = cephx
 auth service required = cephx
@@ -32,10 +30,8 @@ mon cluster log file = /var/log/kolla/ceph/$cluster.log
 
 {% if service_name is defined and service_name == 'ceph-rgw' %}
 [client.radosgw.gateway]
-{% if orchestration_engine != 'KUBERNETES' %}
 host = {{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }}
 rgw frontends = civetweb port={{ api_interface_address }}:{{ rgw_port }}
-{% endif %}
 {% if enable_ceph_rgw_keystone | bool %}
 rgw_keystone_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
 rgw_keystone_admin_user = {{ openstack_auth.username }}
diff --git a/ansible/roles/cinder/templates/cinder.conf.j2 b/ansible/roles/cinder/templates/cinder.conf.j2
index 64a12ddd52..93515ed2ea 100644
--- a/ansible/roles/cinder/templates/cinder.conf.j2
+++ b/ansible/roles/cinder/templates/cinder.conf.j2
@@ -11,7 +11,7 @@ use_stderr = False
 osapi_volume_workers = {{ openstack_service_workers }}
 volume_name_template = volume-%s
 
-glance_api_servers = {{ internal_protocol }}://{% if orchestration_engine == 'KUBERNETES' %}glance-api{% else %}{{ kolla_internal_vip_address }}{% endif %}:{{ glance_api_port }}
+glance_api_servers = {{ internal_protocol }}://{{ kolla_internal_vip_address }}:{{ glance_api_port }}
 
 glance_num_retries = {{ groups['glance-api'] | length }}
 
@@ -54,7 +54,7 @@ api_paste_config = /etc/cinder/api-paste.ini
 
 auth_strategy = keystone
 
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
+transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
 
 [oslo_messaging_notifications]
 {% if enable_ceilometer | bool or enable_searchlight | bool %}
@@ -70,17 +70,12 @@ interface = internal
 token_auth_url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
 
 [database]
-connection = mysql+pymysql://{{ cinder_database_user }}:{{ cinder_database_password }}@{% if orchestration_engine == 'KUBERNETES' %}{{ cinder_database_address }}{% else %}{{ cinder_database_address }}{% endif %}/{{ cinder_database_name }}
+connection = mysql+pymysql://{{ cinder_database_user }}:{{ cinder_database_password }}@{{ cinder_database_address }}/{{ cinder_database_name }}
 max_retries = -1
 
 [keystone_authtoken]
-{% if orchestration_engine == 'KUBERNETES' %}
-auth_uri = {{ keystone_internal_url }}
-auth_url = {{ keystone_admin_url }}
-{% else %}
 auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
 auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-{% endif %}
 auth_type = password
 project_domain_id = {{ default_project_domain_id }}
 user_domain_id = {{ default_user_domain_id }}
@@ -90,7 +85,7 @@ password = {{ cinder_keystone_password }}
 
 memcache_security_strategy = ENCRYPT
 memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{% if orchestration_engine == 'KUBERNETES' %}memcached{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
+memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
 
 
 [oslo_concurrency]
diff --git a/ansible/roles/common/templates/conf/input/01-syslog.conf.j2 b/ansible/roles/common/templates/conf/input/01-syslog.conf.j2
index 249b57e91b..f01f74c5fe 100644
--- a/ansible/roles/common/templates/conf/input/01-syslog.conf.j2
+++ b/ansible/roles/common/templates/conf/input/01-syslog.conf.j2
@@ -1,11 +1,7 @@
 <source>
   @type syslog
   port {{ fluentd_syslog_port }}
-{% if orchestration_engine != 'KUBERNETES' %}
   bind {{ api_interface_address }}
-{% else %}
-  bind 0.0.0.0
-{% endif %}
   tag syslog
   format /^(?<Payload>.*)$/
 </source>
diff --git a/ansible/roles/elasticsearch/templates/elasticsearch.yml.j2 b/ansible/roles/elasticsearch/templates/elasticsearch.yml.j2
index d4e8fbc40d..de3fcda3ad 100644
--- a/ansible/roles/elasticsearch/templates/elasticsearch.yml.j2
+++ b/ansible/roles/elasticsearch/templates/elasticsearch.yml.j2
@@ -2,12 +2,12 @@
 {% set minimum_master_nodes = (num_nodes / 2 + 1) | round(0, 'floor') | int if num_nodes > 2 else 1 %}
 {% set recover_after_nodes = (num_nodes * 2 / 3) | round(0, 'floor') | int if num_nodes > 1 else 1 %}
 node.name: "{{ api_interface_address }}"
-network.host: {% if orchestration_engine == 'KUBERNETES' %}_eth0:ipv4_{% else %}"{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"{% endif %}
+network.host: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
 
-cluster.name: "{% if orchestration_engine == 'KUBERNETES' %}0.0.0.0{% else %}{{ elasticsearch_cluster_name }}{% endif %}"
+cluster.name: "{{ elasticsearch_cluster_name }}"
 node.master: true
 node.data: true
-discovery.zen.ping.unicast.hosts: [{% if orchestration_engine == 'KUBERNETES' %}"0.0.0.0"{% else %}{% for host in groups['elasticsearch'] %}"{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}"{% if not loop.last %},{% endif %}{% endfor %}{% endif %}]
+discovery.zen.ping.unicast.hosts: [{% for host in groups['elasticsearch'] %}"{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}"{% if not loop.last %},{% endif %}{% endfor %}]
 
 discovery.zen.minimum_master_nodes: {{ minimum_master_nodes }}
 gateway.expected_nodes: {{ num_nodes }}
diff --git a/ansible/roles/glance/templates/glance-api.conf.j2 b/ansible/roles/glance/templates/glance-api.conf.j2
index de565d7898..f4a803128c 100644
--- a/ansible/roles/glance/templates/glance-api.conf.j2
+++ b/ansible/roles/glance/templates/glance-api.conf.j2
@@ -9,11 +9,7 @@ bind_host = {{ api_interface_address }}
 bind_port = {{ glance_api_port }}
 workers = {{ openstack_service_workers }}
 
-{% if orchestration_engine == 'KUBERNETES' %}
-registry_host = {{ glance_registry_host }}
-{% else %}
 registry_host = {{ kolla_internal_fqdn }}
-{% endif %}
 
 {% if enable_ceph | bool %}
 show_image_direct_url= True
@@ -21,20 +17,15 @@ show_image_direct_url= True
 
 cinder_catalog_info = volume:cinder:internalURL
 
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
+transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
 
 [database]
 connection = mysql+pymysql://{{ glance_database_user }}:{{ glance_database_password }}@{{ glance_database_address }}/{{ glance_database_name }}
 max_retries = -1
 
 [keystone_authtoken]
-{% if orchestration_engine == 'KUBERNETES' %}
-auth_uri = {{ keystone_internal_url }}
-auth_url = {{ keystone_admin_url }}
-{% else %}
 auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
 auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-{% endif %}
 auth_type = password
 project_domain_id = {{ default_project_domain_id }}
 user_domain_id = {{ default_user_domain_id }}
@@ -42,17 +33,9 @@ project_name = service
 username = {{ glance_keystone_user }}
 password = {{ glance_keystone_password }}
 
-{# For Kolla-Ansible, generate the memcache servers based on the list of
-memcached servers in the inventory and memcached_servers should be un-set.
-For Kolla-Kubernetes,  it is necessary to define the memcached_servers
-variable in globals.yml to set it to the Kubernetes service for memcached. #}
 memcache_security_strategy = ENCRYPT
 memcache_secret_key = {{ memcache_secret_key }}
-{% if orchestration_engine == 'KUBERNETES' %}
-memcache_servers = {{ memcached_servers }}
-{% else %}
 memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-{%- endif %}
 
 [paste_deploy]
 flavor = keystone
diff --git a/ansible/roles/glance/templates/glance-registry.conf.j2 b/ansible/roles/glance/templates/glance-registry.conf.j2
index 1e90941850..eaa9d1a398 100644
--- a/ansible/roles/glance/templates/glance-registry.conf.j2
+++ b/ansible/roles/glance/templates/glance-registry.conf.j2
@@ -8,20 +8,15 @@ bind_host = {{ api_interface_address }}
 bind_port = {{ glance_registry_port }}
 workers = {{ openstack_service_workers }}
 
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
+transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
 
 [database]
 connection = mysql+pymysql://{{ glance_database_user }}:{{ glance_database_password }}@{{ glance_database_address }}/{{ glance_database_name }}
 max_retries = -1
 
 [keystone_authtoken]
-{% if orchestration_engine == 'KUBERNETES' %}
-auth_uri = {{ keystone_internal_url }}
-auth_url = {{ keystone_admin_url }}
-{% else %}
 auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
 auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-{% endif %}
 auth_type = password
 project_domain_id = {{ default_project_domain_id }}
 user_domain_id = {{ default_user_domain_id }}
@@ -29,17 +24,9 @@ project_name = service
 username = {{ glance_keystone_user }}
 password = {{ glance_keystone_password }}
 
-{# For Kolla-Ansible, generate the memcache servers based on the list of
-memcached servers in the inventory and memcached_servers should be un-set.
-For Kolla-Kubernetes,  it is necessary to define the memcached_servers
-variable in globals.yml to set it to the Kubernetes service for memcached. #}
 memcache_security_strategy = ENCRYPT
 memcache_secret_key = {{ memcache_secret_key }}
-{% if orchestration_engine == 'KUBERNETES' %}
-memcache_servers = {{ memcached_servers }}
-{% else %}
 memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-{%- endif %}
 
 [paste_deploy]
 flavor = keystone
diff --git a/ansible/roles/haproxy/templates/haproxy.cfg.j2 b/ansible/roles/haproxy/templates/haproxy.cfg.j2
index ab022f3895..7d63415fb8 100644
--- a/ansible/roles/haproxy/templates/haproxy.cfg.j2
+++ b/ansible/roles/haproxy/templates/haproxy.cfg.j2
@@ -4,9 +4,7 @@ global
   user haproxy
   group haproxy
   daemon
-{% if orchestration_engine != 'KUBERNETES' %}
   log {{ api_interface_address }}:{{ fluentd_syslog_port }} local1
-{% endif %}
   maxconn 4000
   stats socket /var/lib/kolla/haproxy/haproxy.sock
 {% if kolla_enable_tls_external | bool %}
diff --git a/ansible/roles/heat/templates/heat.conf.j2 b/ansible/roles/heat/templates/heat.conf.j2
index c58b0a7a3d..334717c752 100644
--- a/ansible/roles/heat/templates/heat.conf.j2
+++ b/ansible/roles/heat/templates/heat.conf.j2
@@ -20,7 +20,7 @@ trusts_delegated_roles = heat_stack_owner
 num_engine_workers = {{ openstack_service_workers }}
 {% endif %}
 
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
+transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
 
 {% if service_name == 'heat-api' %}
 [heat_api]
@@ -52,41 +52,28 @@ password = {{ heat_keystone_password }}
 
 memcache_security_strategy = ENCRYPT
 memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{% if orchestration_engine == 'KUBERNETES' %}memcached{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
+memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
 
 
 [cache]
 backend = oslo_cache.memcache_pool
 enabled = True
-memcache_servers = {% for host in groups['memcached'] %}{% if orchestration_engine == 'KUBERNETES' %}memcached{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
+memcache_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
 
 
 [trustee]
-{% if orchestration_engine == 'KUBERNETES' %}
-auth_uri = {{ keystone_internal_url }}
-auth_url = {{ keystone_admin_url }}
-{% else %}
 auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
 auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-{% endif %}
 auth_type = password
 user_domain_id = {{ default_user_domain_id }}
 username = {{ heat_keystone_user }}
 password = {{ heat_keystone_password }}
 
 [ec2authtoken]
-{% if orchestration_engine == 'KUBERNETES' %}
-auth_uri = {{ keystone_internal_url }}/v3
-{% else %}
 auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}/v3
-{% endif %}
 
 [clients_keystone]
-{% if orchestration_engine == 'KUBERNETES' %}
-auth_uri = {{ keystone_internal_url }}
-{% else %}
 auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
-{% endif %}
 
 [oslo_messaging_notifications]
 {% if enable_ceilometer | bool %}
diff --git a/ansible/roles/horizon/templates/local_settings.j2 b/ansible/roles/horizon/templates/local_settings.j2
index e739b991a7..d6b9345883 100644
--- a/ansible/roles/horizon/templates/local_settings.j2
+++ b/ansible/roles/horizon/templates/local_settings.j2
@@ -163,12 +163,7 @@ SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
 CACHES = {
     'default': {
         'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
-
-{% if orchestration_engine == 'KUBERNETES' %}
-        'LOCATION': '{{ memcached_servers }}'
-{% else %}
         'LOCATION': [{% for host in groups['memcached'] %}'{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}'{% if not loop.last %},{% endif %}{% endfor %}]
-{%- endif %}
     }
 }
 {% endif %}
@@ -193,7 +188,7 @@ AVAILABLE_REGIONS = [
 ]
 {% endif %}
 
-OPENSTACK_HOST = "{% if orchestration_engine == 'KUBERNETES' %}{{ api_interface_address }}{% else %}{{ kolla_internal_fqdn }}{% endif %}"
+OPENSTACK_HOST = "{{ kolla_internal_fqdn }}"
 
 OPENSTACK_KEYSTONE_URL = "{{ keystone_internal_url }}"
 OPENSTACK_KEYSTONE_DEFAULT_ROLE = "{{ keystone_default_user_role }}"
diff --git a/ansible/roles/ironic/tasks/config.yml b/ansible/roles/ironic/tasks/config.yml
index b09fcf742b..9907fd5a50 100644
--- a/ansible/roles/ironic/tasks/config.yml
+++ b/ansible/roles/ironic/tasks/config.yml
@@ -79,7 +79,6 @@
   with_items:
     - "ironic-agent.kernel"
     - "ironic-agent.initramfs"
-  when: orchestration_engine != 'KUBERNETES'
 
 - name: Check if policies shall be overwritten
   local_action: stat path="{{ node_custom_config }}/ironic/policy.json"
diff --git a/ansible/roles/ironic/tasks/precheck.yml b/ansible/roles/ironic/tasks/precheck.yml
index 0538436a6e..5403e918af 100644
--- a/ansible/roles/ironic/tasks/precheck.yml
+++ b/ansible/roles/ironic/tasks/precheck.yml
@@ -35,7 +35,6 @@
   failed_when: not result.stat.exists
   when:
     - inventory_hostname in groups['ironic-pxe']
-    - orchestration_engine != 'KUBERNETES'
   with_items:
     - "ironic-agent.kernel"
     - "ironic-agent.initramfs"
diff --git a/ansible/roles/ironic/templates/ironic-dnsmasq.conf.j2 b/ansible/roles/ironic/templates/ironic-dnsmasq.conf.j2
index d121761361..5d339e59b5 100644
--- a/ansible/roles/ironic/templates/ironic-dnsmasq.conf.j2
+++ b/ansible/roles/ironic/templates/ironic-dnsmasq.conf.j2
@@ -1,15 +1,8 @@
 port=0
-{% if orchestration_engine != 'KUBERNETES' %}
 interface={{ api_interface }}
 dhcp-range={{ ironic_dnsmasq_dhcp_range }}
 dhcp-option=option:tftp-server,{{ kolla_internal_vip_address }}
 dhcp-option=option:server-ip-address,{{ kolla_internal_vip_address }}
-{% else %}
-interface=undefined
-dhcp-range=
-dhcp-option=option:tftp-server,undefined
-dhcp-option=option:server-ip-address,undefined
-{% endif %}
 bind-interfaces
 dhcp-sequential-ip
 dhcp-option=option:bootfile-name,pxelinux.0
diff --git a/ansible/roles/ironic/templates/ironic-inspector.conf.j2 b/ansible/roles/ironic/templates/ironic-inspector.conf.j2
index 5b93100fe6..e38ce7a4f3 100644
--- a/ansible/roles/ironic/templates/ironic-inspector.conf.j2
+++ b/ansible/roles/ironic/templates/ironic-inspector.conf.j2
@@ -2,19 +2,11 @@
 debug = {{ ironic_logging_debug }}
 log_dir = /var/log/kolla/ironic-inspector
 
-{% if orchestration_engine != 'KUBERNETES' %}
 listen_address = {{ api_interface_address }}
-{% else %}
-listen_address = 0.0.0.0
-{% endif %}
 listen_port = {{ ironic_inspector_port }}
 
 [ironic]
-{% if orchestration_engine == 'KUBERNETES' %}
-auth_url = {{ keystone_admin_url }}
-{% else %}
 auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-{% endif %}
 auth_type = password
 project_domain_id = {{ default_project_domain_id }}
 user_domain_id = {{ default_user_domain_id }}
@@ -23,13 +15,8 @@ username = {{ ironic_inspector_keystone_user }}
 password = {{ ironic_inspector_keystone_password }}
 
 [keystone_authtoken]
-{% if orchestration_engine == 'KUBERNETES' %}
-auth_uri = {{ keystone_internal_url }}
-auth_url = {{ keystone_admin_url }}
-{% else %}
 auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
 auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-{% endif %}
 auth_type = password
 project_domain_id = {{ default_project_domain_id }}
 user_domain_id = {{ default_user_domain_id }}
@@ -39,18 +26,10 @@ password = {{ ironic_inspector_keystone_password }}
 
 memcache_security_strategy = ENCRYPT
 memcache_secret_key = {{ memcache_secret_key }}
-{% if orchestration_engine == 'KUBERNETES' %}
-memcache_servers = {{ memcached_servers }}
-{% else %}
 memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-{%- endif %}
 
 [firewall]
-{% if orchestration_engine == 'KUBERNETES' %}
-dnsmasq_interface = undefined
-{% else %}
 dnsmasq_interface = {{ ironic_dnsmasq_interface }}
-{%- endif %}
 
 [database]
 connection = mysql+pymysql://{{ ironic_inspector_database_user }}:{{ ironic_inspector_database_password }}@{{ ironic_inspector_database_address }}/{{ ironic_inspector_database_name }}
diff --git a/ansible/roles/ironic/templates/ironic-pxe.json.j2 b/ansible/roles/ironic/templates/ironic-pxe.json.j2
index 536d8fad77..7e07a14af5 100644
--- a/ansible/roles/ironic/templates/ironic-pxe.json.j2
+++ b/ansible/roles/ironic/templates/ironic-pxe.json.j2
@@ -1,7 +1,6 @@
 {
     "command": "/usr/sbin/in.tftpd --verbose --foreground --user root --address 0.0.0.0:69 --map-file /map-file /tftpboot",
     "config_files": [
-{% if orchestration_engine != 'KUBERNETES' %}
         {
             "source": "{{ container_config_directory }}/ironic-agent.kernel",
             "dest": "/tftpboot/ironic-agent.kernel",
@@ -14,7 +13,6 @@
             "owner": "root",
             "perm": "0644"
         },
-{% endif %}
         {
             "source": "{{ container_config_directory }}/default",
             "dest": "/tftpboot/pxelinux.cfg/default",
diff --git a/ansible/roles/ironic/templates/ironic.conf.j2 b/ansible/roles/ironic/templates/ironic.conf.j2
index db92ebd6cf..6164350a8b 100644
--- a/ansible/roles/ironic/templates/ironic.conf.j2
+++ b/ansible/roles/ironic/templates/ironic.conf.j2
@@ -8,25 +8,17 @@ admin_password = {{ keystone_admin_password }}
 
 enabled_drivers = pxe_ipmitool,pxe_ssh
 
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
+transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
 
 {% if service_name == 'ironic-api' %}
 [api]
-
-{% if orchestration_engine != 'KUBERNETES' %}
 host_ip = {{ api_interface_address }}
-{% else %}
-host_ip = 0.0.0.0
-{% endif %}
 api_workers = {{ openstack_service_workers }}
 {% endif %}
 
 {% if service_name == 'ironic-conductor' %}
 [conductor]
-{% if orchestration_engine != 'KUBERNETES' %}
 api_url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ ironic_api_port }}
-{% else %}
-api_url = {{ internal_protocol }}://ironic-api:{{ ironic_api_port }}
 {% endif %}
 automated_clean=false
 {% endif %}
@@ -36,13 +28,8 @@ connection = mysql+pymysql://{{ ironic_database_user }}:{{ ironic_database_passw
 max_retries = -1
 
 [keystone_authtoken]
-{% if orchestration_engine == 'KUBERNETES' %}
-auth_uri = {{ keystone_internal_url }}
-auth_url = {{ keystone_admin_url }}
-{% else %}
 auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
 auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-{% endif %}
 auth_type = password
 project_domain_id = {{ default_project_domain_id }}
 user_domain_id = {{ default_user_domain_id }}
@@ -52,11 +39,7 @@ password = {{ ironic_keystone_password }}
 
 memcache_security_strategy = ENCRYPT
 memcache_secret_key = {{ memcache_secret_key }}
-{% if orchestration_engine == 'KUBERNETES' %}
-memcache_servers = {{ memcached_servers }}
-{% else %}
 memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-{%- endif %}
 
 [deploy]
 default_boot_option = netboot
diff --git a/ansible/roles/ironic/templates/pxelinux.default.j2 b/ansible/roles/ironic/templates/pxelinux.default.j2
index 192c2a1f30..5304611a3d 100644
--- a/ansible/roles/ironic/templates/pxelinux.default.j2
+++ b/ansible/roles/ironic/templates/pxelinux.default.j2
@@ -2,10 +2,6 @@ default introspect
 
 label introspect
 kernel ironic-agent.kernel
-{% if orchestration_engine != 'KUBERNETES' %}
 append initrd=ironic-agent.initramfs ipa-inspection-callback-url=http://{{ kolla_internal_vip_address }}:{{ ironic_inspector_port }}/v1/continue systemd.journald.forward_to_console=yes
-{% else %}
-append initrd=ironic-agent.initramfs ipa-inspection-callback-url=http://ironic-inspector:{{ ironic_inspector_port }}/v1/continue systemd.journald.forward_to_console=yes
-{% endif %}
 
 ipappend 3
diff --git a/ansible/roles/karbor/templates/karbor.conf.j2 b/ansible/roles/karbor/templates/karbor.conf.j2
index fa54d41422..d1a0394e77 100644
--- a/ansible/roles/karbor/templates/karbor.conf.j2
+++ b/ansible/roles/karbor/templates/karbor.conf.j2
@@ -1,7 +1,7 @@
 [DEFAULT]
 debug = {{ karbor_logging_debug }}
 log_dir = /var/log/kolla/karbor
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
+transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
 
 {% if service_name == 'karbor-api' %}
 osapi_karbor_listen = {{ api_interface_address }}
@@ -44,4 +44,4 @@ memcache_secret_key = {{ memcache_secret_key }}
 memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
 
 [oslo_messaging_rabbit]
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
+transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
diff --git a/ansible/roles/keystone/templates/keystone.conf.j2 b/ansible/roles/keystone/templates/keystone.conf.j2
index 43aa002559..886e4d94f4 100644
--- a/ansible/roles/keystone/templates/keystone.conf.j2
+++ b/ansible/roles/keystone/templates/keystone.conf.j2
@@ -2,7 +2,7 @@
 debug = {{ keystone_logging_debug }}
 {% if enable_cadf_notifications | bool %}
 notification_format = cadf
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
+transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
 {% endif %}
 
 # NOTE(elemoine) log_dir alone does not work for Keystone
@@ -37,17 +37,7 @@ max_active_keys = {{ (groups['keystone'] | length) + 1 }}
 [cache]
 backend = oslo_cache.memcache_pool
 enabled = True
-
-{# For Kolla-Ansible, generate the memcache servers based on the list of
-memcached servers in the inventory and memcached_servers should be un-set.
-For Kolla-Kubernetes,  it is necessary to define the memcached_servers
-variable in globals.yml to set it to the Kubernetes service for memcached. #}
-
-{% if orchestration_engine == 'KUBERNETES' %}
-memcache_servers = {{ memcached_servers }}
-{% else %}
 memcache_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-{%- endif %}
 
 {% if enable_cadf_notifications | bool %}
 [oslo_messaging_notifications]
diff --git a/ansible/roles/kibana/templates/kibana.yml.j2 b/ansible/roles/kibana/templates/kibana.yml.j2
index fbdcdcc98a..12920d1333 100644
--- a/ansible/roles/kibana/templates/kibana.yml.j2
+++ b/ansible/roles/kibana/templates/kibana.yml.j2
@@ -2,7 +2,7 @@ kibana.defaultAppId: "{{ kibana_default_app_id }}"
 logging.dest: /var/log/kolla/kibana/kibana.log
 server.port: {{ kibana_server_port }}
 server.host: "{{ api_interface_address }}"
-elasticsearch.url: "{{ internal_protocol }}://{% if orchestration_engine == 'KUBERNETES' %}elasticsearch{% else %}{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}{% endif %}:{{ elasticsearch_port }}"
+elasticsearch.url: "{{ internal_protocol }}://{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}:{{ elasticsearch_port }}"
 elasticsearch.requestTimeout: {{ kibana_elasticsearch_request_timeout }}
 elasticsearch.shardTimeout: {{ kibana_elasticsearch_shard_timeout }}
 elasticsearch.ssl.verify: {{ kibana_elasticsearch_ssl_verify }}
diff --git a/ansible/roles/mariadb/templates/galera.cnf.j2 b/ansible/roles/mariadb/templates/galera.cnf.j2
index bee9d80ac7..1adcd483ad 100644
--- a/ansible/roles/mariadb/templates/galera.cnf.j2
+++ b/ansible/roles/mariadb/templates/galera.cnf.j2
@@ -1,9 +1,5 @@
 {%- set wsrep_driver = '/usr/lib/galera/libgalera_smm.so' if kolla_base_distro == 'ubuntu' else '/usr/lib64/galera/libgalera_smm.so' %}
 
-{#- Disable Galera in the case of of Kubernetes as its not supported yet.  Otherwise, #}
-{#- mariadb will fail to start #}
-{%- set wsrep_driver = 'none' if orchestration_engine == 'KUBERNETES' else wsrep_driver %}
-
 [client]
 default-character-set=utf8
 
diff --git a/ansible/roles/neutron/defaults/main.yml b/ansible/roles/neutron/defaults/main.yml
index 944a3eac79..d4aff655e0 100644
--- a/ansible/roles/neutron/defaults/main.yml
+++ b/ansible/roles/neutron/defaults/main.yml
@@ -7,7 +7,7 @@ neutron_services:
     image: "{{ neutron_server_image_full }}"
     enabled: true
     group: "neutron-server"
-    host_in_groups: "{{ True if orchestration_engine == 'KUBERNETES' else inventory_hostname in groups['neutron-server'] }}"
+    host_in_groups: "{{ inventory_hostname in groups['neutron-server'] }}"
     volumes:
       - "{{ node_config_directory }}/neutron-server/:{{ container_config_directory }}/:ro"
       - "/etc/localtime:/etc/localtime:ro"
@@ -19,7 +19,6 @@ neutron_services:
     privileged: True
     host_in_groups: >-
       {{
-      True if orchestration_engine == 'KUBERNETES' else
       ( inventory_hostname in groups['compute']
       or (enable_manila | bool and inventory_hostname in groups['manila-share'])
       or inventory_hostname in groups['neutron-dhcp-agent']
@@ -48,7 +47,6 @@ neutron_services:
     privileged: True
     host_in_groups: >-
       {{
-      True if orchestration_engine == 'KUBERNETES' else
       ( inventory_hostname in groups['compute']
       or (enable_manila | bool and inventory_hostname in groups['manila-share'])
       or inventory_hostname in groups['neutron-dhcp-agent']
@@ -78,7 +76,6 @@ neutron_services:
       NEUTRON_INTERFACE: "{{ neutron_external_interface }}"
     host_in_groups: >-
       {{
-      True if orchestration_engine == 'KUBERNETES' else
       inventory_hostname in groups['compute']
       or (enable_manila | bool and inventory_hostname in groups['manila-share'])
       or inventory_hostname in groups['neutron-dhcp-agent']
@@ -98,7 +95,7 @@ neutron_services:
     privileged: True
     enabled: "{{ neutron_plugin_agent != 'vmware_nsxv' }}"
     group: "neutron-dhcp-agent"
-    host_in_groups: "{{ True if orchestration_engine == 'KUBERNETES' else inventory_hostname in groups['neutron-dhcp-agent'] }}"
+    host_in_groups: "{{ inventory_hostname in groups['neutron-dhcp-agent'] }}"
     volumes:
       - "{{ node_config_directory }}/neutron-dhcp-agent/:{{ container_config_directory }}/:ro"
       - "/etc/localtime:/etc/localtime:ro"
@@ -112,7 +109,6 @@ neutron_services:
     enabled: "{{ not enable_neutron_vpnaas | bool and neutron_plugin_agent != 'vmware_nsxv' }}"
     host_in_groups: >-
       {{
-      True if orchestration_engine == 'KUBERNETES' else
       inventory_hostname in groups['neutron-l3-agent']
       or (inventory_hostname in groups['compute'] and enable_neutron_dvr | bool)
       }}
@@ -128,7 +124,7 @@ neutron_services:
     privileged: True
     enabled: "{{ enable_neutron_lbaas | bool and neutron_plugin_agent != 'vmware_nsxv' }}"
     group: "neutron-lbaas-agent"
-    host_in_groups: "{{ True if orchestration_engine == 'KUBERNETES' else inventory_hostname in groups['neutron-lbaas-agent'] }}"
+    host_in_groups: "{{ inventory_hostname in groups['neutron-lbaas-agent'] }}"
     volumes:
       - "{{ node_config_directory }}/neutron-lbaas-agent/:{{ container_config_directory }}/:ro"
       - "/etc/localtime:/etc/localtime:ro"
@@ -141,7 +137,6 @@ neutron_services:
     enabled: "{{ neutron_plugin_agent != 'vmware_nsxv' }}"
     host_in_groups: >-
       {{
-      True if orchestration_engine == 'KUBERNETES' else
       inventory_hostname in groups['neutron-metadata-agent']
       or (inventory_hostname in groups['compute'] and enable_neutron_dvr | bool)
       }}
@@ -157,7 +152,7 @@ neutron_services:
     privileged: True
     enabled: "{{ enable_neutron_vpnaas | bool and neutron_plugin_agent != 'vmware_nsxv' }}"
     group: "neutron-vpnaas-agent"
-    host_in_groups: "{{ True if orchestration_engine == 'KUBERNETES' else inventory_hostname in groups['neutron-vpnaas-agent'] }}"
+    host_in_groups: "{{ inventory_hostname in groups['neutron-vpnaas-agent'] }}"
     volumes:
       - "{{ node_config_directory }}/neutron-vpnaas-agent/:{{ container_config_directory }}/:ro"
       - "/etc/localtime:/etc/localtime:ro"
@@ -170,7 +165,7 @@ neutron_services:
     privileged: True
     enabled: "{{ enable_neutron_bgp_dragent | bool and neutron_plugin_agent != 'vmware_nsxv' }}"
     group: "neutron-bgp-dragent"
-    host_in_groups: "{{ False if orchestration_engine == 'KUBERNETES' else inventory_hostname in groups['neutron-bgp-dragent'] }}"
+    host_in_groups: "{{ inventory_hostname in groups['neutron-bgp-dragent'] }}"
     volumes:
       - "{{ node_config_directory }}/neutron-bgp-dragent/:{{ container_config_directory }}/:ro"
       - "/etc/localtime:/etc/localtime:ro"
diff --git a/ansible/roles/neutron/templates/metadata_agent.ini.j2 b/ansible/roles/neutron/templates/metadata_agent.ini.j2
index b9b1548eb5..944ba1bcfb 100644
--- a/ansible/roles/neutron/templates/metadata_agent.ini.j2
+++ b/ansible/roles/neutron/templates/metadata_agent.ini.j2
@@ -1,6 +1,6 @@
 # metadata_agent.ini
 [DEFAULT]
-nova_metadata_host = {% if orchestration_engine == 'KUBERNETES' %}nova-metadata{% else %}{{ kolla_internal_fqdn }}{% endif %}
+nova_metadata_host = {{ kolla_internal_fqdn }}
 
 nova_metadata_port = {{ nova_metadata_port }}
 metadata_proxy_shared_secret = {{ metadata_secret }}
diff --git a/ansible/roles/neutron/templates/neutron.conf.j2 b/ansible/roles/neutron/templates/neutron.conf.j2
index 97dc71f5fd..cb474dfc69 100644
--- a/ansible/roles/neutron/templates/neutron.conf.j2
+++ b/ansible/roles/neutron/templates/neutron.conf.j2
@@ -50,7 +50,7 @@ l3_ha = true
 max_l3_agents_per_router = {{ max_l3_agents_per_router }}
 {% endif %}
 
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
+transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
 
 {% if enable_neutron_dvr | bool %}
 router_distributed = True
@@ -95,11 +95,7 @@ password = {{ neutron_keystone_password }}
 memcache_security_strategy = ENCRYPT
 memcache_secret_key = {{ memcache_secret_key }}
 
-{% if orchestration_engine == 'KUBERNETES' %}
-memcache_servers = {{ memcached_servers }}
-{% else %}
-memcached_servers = {% for host in groups['memcached'] %}{% if orchestration_engine == 'KUBERNETES' %}memcached{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-{% endif %}
+memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
 
 [oslo_messaging_notifications]
 {% if neutron_enabled_notification_topics %}
diff --git a/ansible/roles/nova/defaults/main.yml b/ansible/roles/nova/defaults/main.yml
index 6339e17ba6..cb7e9c7446 100644
--- a/ansible/roles/nova/defaults/main.yml
+++ b/ansible/roles/nova/defaults/main.yml
@@ -111,7 +111,7 @@ nova_services:
     group: "compute"
     image: "{{ nova_compute_image_full }}"
     privileged: True
-    enabled: "{{ True if orchestration_engine == 'KUBERNETES' else not enable_nova_fake | bool }}"
+    enabled: "{{ not enable_nova_fake | bool }}"
     volumes:
       - "{{ node_config_directory }}/nova-compute/:{{ container_config_directory }}/:ro"
       - "/etc/localtime:/etc/localtime:ro"
diff --git a/ansible/roles/nova/templates/nova.conf.j2 b/ansible/roles/nova/templates/nova.conf.j2
index d6223a7923..4ead2182ab 100644
--- a/ansible/roles/nova/templates/nova.conf.j2
+++ b/ansible/roles/nova/templates/nova.conf.j2
@@ -31,11 +31,7 @@ reserved_host_memory_mb = 0
 {% endif %}
 
 {% if enable_nova_fake | bool %}
-{% if orchestration_engine == 'KUBERNETES' %}
-host = empty
-{% else %}
 host = {{ ansible_hostname }}_{{ service_name }}
-{% endif %}
 compute_driver = fake.FakeDriver
 {% else %}
 compute_driver = libvirt.LibvirtDriver
@@ -52,7 +48,7 @@ compute_monitors=nova.compute.monitors.cpu.virt_driver
 {% endif %}
 {% endif %}
 
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
+transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
 
 [api]
 use_forwarded_for = true
@@ -70,7 +66,7 @@ novncproxy_port = {{ nova_novncproxy_port }}
 vncserver_listen = {{ api_interface_address }}
 vncserver_proxyclient_address = {{ api_interface_address }}
 {% if inventory_hostname in groups['compute'] %}
-novncproxy_base_url = {{ public_protocol }}://{% if orchestration_engine == 'KUBERNETES' %}kolla_kubernetes_external_vip{% else %}{{ kolla_external_fqdn }}{% endif %}:{{ nova_novncproxy_port }}/vnc_auto.html
+novncproxy_base_url = {{ public_protocol }}://{{ kolla_external_fqdn }}:{{ nova_novncproxy_port }}/vnc_auto.html
 {% endif %}
 {% endif %}
 {% elif nova_console == 'spice' %}
@@ -82,7 +78,7 @@ enabled = true
 server_listen = {{ api_interface_address }}
 server_proxyclient_address = {{ api_interface_address }}
 {% if inventory_hostname in groups['compute'] %}
-html5proxy_base_url = {{ public_protocol }}://{% if orchestration_engine == 'KUBERNETES' %}kolla_kubernetes_external_vip{% else %}{{ kolla_external_fqdn }}{% endif %}:{{ nova_spicehtml5proxy_port }}/spice_auto.html
+html5proxy_base_url = {{ public_protocol }}://{{ kolla_external_fqdn }}:{{ nova_spicehtml5proxy_port }}/spice_auto.html
 {% endif %}
 html5proxy_host = {{ api_interface_address }}
 html5proxy_port = {{ nova_spicehtml5proxy_port }}
@@ -100,27 +96,19 @@ proxyclient_address = {{ api_interface_address }}
 [ironic]
 username = {{ ironic_keystone_user }}
 password = {{ ironic_keystone_password }}
-{% if orchestration_engine == 'KUBERNETES' %}
-auth_url = {{ keystone_admin_url }}
-{% else %}
 auth_url = {{ openstack_auth.auth_url }}/v3
-{% endif %}
 auth_type = password
 project_name = service
 user_domain_name = {{ default_user_domain_name }}
 project_domain_name = {{ default_project_domain_name }}
-{% if orchestration_engine != 'KUBERNETES' %}
 api_endpoint = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ ironic_api_port }}/v1
-{% else %}
-api_endpoint = {{ internal_protocol }}://ironic-api:{{ ironic_api_port }}/v1
-{% endif %}
 {% endif %}
 
 [oslo_concurrency]
 lock_path = /var/lib/nova/tmp
 
 [glance]
-api_servers = {{ internal_protocol }}://{% if orchestration_engine == 'KUBERNETES' %}glance-api{% else %}{{ kolla_internal_vip_address }}{% endif %}:{{ glance_api_port }}
+api_servers = {{ internal_protocol }}://{{ kolla_internal_vip_address }}:{{ glance_api_port }}
 
 num_retries = {{ groups['glance-api'] | length }}
 
@@ -131,11 +119,7 @@ os_region_name = {{ openstack_region_name }}
 {% endif %}
 
 [neutron]
-{% if orchestration_engine == 'KUBERNETES' %}
-url = {{ internal_protocol }}://neutron-server:{{ neutron_server_port }}
-{% else %}
 url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ neutron_server_port }}
-{% endif %}
 metadata_proxy_shared_secret = {{ metadata_secret }}
 service_metadata_proxy = true
 
@@ -160,17 +144,12 @@ max_retries = -1
 [cache]
 backend = oslo_cache.memcache_pool
 enabled = True
-memcache_servers = {% for host in groups['memcached'] %}{% if orchestration_engine == 'KUBERNETES' %}memcached{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
+memcache_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
 
 
 [keystone_authtoken]
-{% if orchestration_engine == 'KUBERNETES' %}
-auth_uri = {{ keystone_internal_url }}
-auth_url = {{ keystone_admin_url }}
-{% else %}
 auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
 auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-{% endif %}
 auth_type = password
 project_domain_id = {{ default_project_domain_id }}
 user_domain_id = {{ default_user_domain_id }}
@@ -180,7 +159,7 @@ password = {{ nova_keystone_password }}
 
 memcache_security_strategy = ENCRYPT
 memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{% if orchestration_engine == 'KUBERNETES' %}memcached{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
+memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
 
 
 [libvirt]
@@ -240,11 +219,7 @@ default_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabi
 
 [placement]
 auth_type = password
-{% if orchestration_engine == 'KUBERNETES' %}
-auth_url = {{ keystone_admin_url }}
-{% else %}
 auth_url = {{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}
-{% endif %}
 username = {{ placement_keystone_user }}
 password = {{ placement_keystone_password }}
 user_domain_name = {{ default_user_domain_name }}
diff --git a/ansible/roles/openvswitch/defaults/main.yml b/ansible/roles/openvswitch/defaults/main.yml
index 730975df9b..12eeaacae9 100644
--- a/ansible/roles/openvswitch/defaults/main.yml
+++ b/ansible/roles/openvswitch/defaults/main.yml
@@ -9,7 +9,6 @@ openvswitch_services:
     group: openvswitch
     host_in_groups: >-
       {{
-      True if orchestration_engine == 'KUBERNETES' else
       inventory_hostname in groups['compute']
       or (enable_manila | bool and inventory_hostname in groups['manila-share'])
       or inventory_hostname in groups['neutron-dhcp-agent']
@@ -30,7 +29,6 @@ openvswitch_services:
     group: openvswitch
     host_in_groups: >-
       {{
-      True if orchestration_engine == 'KUBERNETES' else
       inventory_hostname in groups['compute']
       or (enable_manila | bool and inventory_hostname in groups['manila-share'])
       or inventory_hostname in groups['neutron-dhcp-agent']
diff --git a/ansible/roles/openvswitch/templates/openvswitch-db-server.json.j2 b/ansible/roles/openvswitch/templates/openvswitch-db-server.json.j2
index 72b9ad694a..fdeea72ecb 100644
--- a/ansible/roles/openvswitch/templates/openvswitch-db-server.json.j2
+++ b/ansible/roles/openvswitch/templates/openvswitch-db-server.json.j2
@@ -1,4 +1,4 @@
 {
-    "command": "start-ovsdb-server {{ api_interface_address }} {% if orchestration_engine == 'KUBERNETES' %} {{ neutron_bridge_name }} {{ neutron_external_interface }} {% endif %}",
+    "command": "start-ovsdb-server {{ api_interface_address }}",
     "config_files": []
 }
diff --git a/ansible/roles/rabbitmq/templates/rabbitmq-env.conf.j2 b/ansible/roles/rabbitmq/templates/rabbitmq-env.conf.j2
index 551c43fb4c..addadca9c5 100644
--- a/ansible/roles/rabbitmq/templates/rabbitmq-env.conf.j2
+++ b/ansible/roles/rabbitmq/templates/rabbitmq-env.conf.j2
@@ -1,8 +1,6 @@
 RABBITMQ_NODENAME=rabbit
-{% if orchestration_engine == 'ANSIBLE' %}
 RABBITMQ_BOOT_MODULE=rabbit_clusterer
 RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS="-pa /usr/lib/rabbitmq/lib/{{ rabbitmq_version }}"
-{%- endif %}
 
 RABBITMQ_LOG_BASE=/var/log/kolla/{{ project_name }}
 
diff --git a/ansible/roles/rabbitmq/templates/rabbitmq.config.j2 b/ansible/roles/rabbitmq/templates/rabbitmq.config.j2
index 037474aeac..11f846ebf6 100644
--- a/ansible/roles/rabbitmq/templates/rabbitmq.config.j2
+++ b/ansible/roles/rabbitmq/templates/rabbitmq.config.j2
@@ -7,9 +7,8 @@
   {rabbit, [
     {tcp_listeners, [
       {"{{ api_interface_address }}", {{ role_rabbitmq_port }}}
-    ]}{% if orchestration_engine == 'ANSIBLE' %},
+    ]},
     {cluster_partition_handling, autoheal}
-    {%- endif %}
   ]},
   {rabbitmq_management, [
     {listener, [
@@ -17,8 +16,7 @@
       {port, {{ role_rabbitmq_management_port }}}
     ]},
     {load_definitions, "/etc/rabbitmq/definitions.json"}
-  ]}{% if orchestration_engine == 'ANSIBLE' %},
+  ]},
   {rabbitmq_clusterer, [{config, "/etc/rabbitmq/rabbitmq-clusterer.config"}]}
-{%- endif %}
 ].
 % EOF
diff --git a/ansible/roles/sahara/templates/sahara.conf.j2 b/ansible/roles/sahara/templates/sahara.conf.j2
index 62ade771f0..fe7affd541 100644
--- a/ansible/roles/sahara/templates/sahara.conf.j2
+++ b/ansible/roles/sahara/templates/sahara.conf.j2
@@ -29,11 +29,7 @@ admin_tenant_name = service
 
 memcache_security_strategy = ENCRYPT
 memcache_secret_key = {{ memcache_secret_key }}
-{% if orchestration_engine == 'KUBERNETES' %}
-memcache_servers = {{ memcached_servers }}
-{% else %}
 memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-{% endif %}
 
 [service_credentials]
 auth_url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
diff --git a/ansible/roles/searchlight/templates/searchlight.conf.j2 b/ansible/roles/searchlight/templates/searchlight.conf.j2
index 17ab74d3b7..0cf17833d6 100644
--- a/ansible/roles/searchlight/templates/searchlight.conf.j2
+++ b/ansible/roles/searchlight/templates/searchlight.conf.j2
@@ -3,7 +3,7 @@ debug = {{ searchlight_logging_debug }}
 
 policy_file = /etc/searchlight/policy.json
 log_dir = /var/log/kolla/searchlight
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
+transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
 
 
 [api]
@@ -34,7 +34,7 @@ auth_type = password
 
 memcache_security_strategy = ENCRYPT
 memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{% if orchestration_engine == 'KUBERNETES' %}memcached{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
+memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
 
 [service_credentials]
 auth_uri = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}
@@ -50,7 +50,7 @@ auth_plugin = password
 
 memcache_security_strategy = ENCRYPT
 memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{% if orchestration_engine == 'KUBERNETES' %}memcached{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
+memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
 
 [resource_plugin:os_cinder_volume]
 enabled = {{ enable_cinder | bool }}
diff --git a/ansible/roles/solum/templates/solum.conf.j2 b/ansible/roles/solum/templates/solum.conf.j2
index 7ecd449265..42f929a718 100644
--- a/ansible/roles/solum/templates/solum.conf.j2
+++ b/ansible/roles/solum/templates/solum.conf.j2
@@ -56,4 +56,4 @@ memcache_secret_key = {{ memcache_secret_key }}
 memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
 
 [oslo_messaging_rabbit]
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
+transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
diff --git a/ansible/roles/zun/templates/zun.conf.j2 b/ansible/roles/zun/templates/zun.conf.j2
index df1e9c38ba..128d47cc44 100644
--- a/ansible/roles/zun/templates/zun.conf.j2
+++ b/ansible/roles/zun/templates/zun.conf.j2
@@ -1,7 +1,7 @@
 [DEFAULT]
 debug = {{ zun_logging_debug }}
 log_dir = /var/log/kolla/zun
-transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{% if orchestration_engine == 'KUBERNETES' %}rabbitmq{% else %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{% endif %}:{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
+transport_url = rabbit://{% for host in groups['rabbitmq'] %}{{ rabbitmq_user }}:{{ rabbitmq_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}{{ rabbitmq_port }}{% if not loop.last %},{% endif %}{% endfor %}
 
 container_driver = docker.driver.DockerDriver
 image_driver_list = glance