# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Default values for neutron.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value

release_group: null

images:
  tags:
    bootstrap: docker.io/openstackhelm/heat:ocata
    test: docker.io/xrally/xrally-openstack:1.3.0
    db_init: docker.io/openstackhelm/heat:ocata
    neutron_db_sync: docker.io/openstackhelm/neutron:ocata
    db_drop: docker.io/openstackhelm/heat:ocata
    rabbit_init: docker.io/rabbitmq:3.7-management
    ks_user: docker.io/openstackhelm/heat:ocata
    ks_service: docker.io/openstackhelm/heat:ocata
    ks_endpoints: docker.io/openstackhelm/heat:ocata
    neutron_server: docker.io/openstackhelm/neutron:ocata
    neutron_dhcp: docker.io/openstackhelm/neutron:ocata
    neutron_metadata: docker.io/openstackhelm/neutron:ocata
    neutron_l3: docker.io/openstackhelm/neutron:ocata
    neutron_openvswitch_agent: docker.io/openstackhelm/neutron:ocata
    neutron_linuxbridge_agent: docker.io/openstackhelm/neutron:ocata
    neutron_sriov_agent: docker.io/openstackhelm/neutron:ocata-sriov-1804
    neutron_sriov_agent_init: docker.io/openstackhelm/neutron:ocata-sriov-1804
    dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.3.1
    image_repo_sync: docker.io/docker:17.07.0
  pull_policy: "IfNotPresent"
  local_registry:
    active: false
    exclude:
      - dep_check
      - image_repo_sync

labels:
  agent:
    dhcp:
      node_selector_key: openstack-control-plane
      node_selector_value: enabled
    l3:
      node_selector_key: openstack-control-plane
      node_selector_value: enabled
    metadata:
      node_selector_key: openstack-control-plane
      node_selector_value: enabled
  job:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  lb:
    node_selector_key: linuxbridge
    node_selector_value: enabled
  # openvswitch is a special case, requiring a special
  # label that can apply to both control hosts
  # and compute hosts, until we get more sophisticated
  # with our daemonset scheduling
  ovs:
    node_selector_key: openvswitch
    node_selector_value: enabled
  sriov:
    node_selector_key: sriov
    node_selector_value: enabled
  server:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  test:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled

network:
  # provide what type of network wiring will be used
  # possible options: openvswitch, linuxbridge, sriov
  backend:
    - openvswitch
  # NOTE(Portdirect): Share network namespaces with the host,
  # allowing agents to be restarted without packet loss and simpler
  # debugging. This feature requires mount propagation support.
  share_namespaces: true
  interface:
  # Tunnel interface will be used for VXLAN tunneling.
    tunnel: null
  # If tunnel is null there is a fallback mechanism to search
  # for interface with routing using tunnel network cidr.
    tunnel_network_cidr: "0/0"
  sriov:
  # To perform setup of network interfaces using the SR-IOV init
  # container you can use a section similar to:
  # sriov:
  #   - device: ${DEV}
  #     num_vfs: 8
  #     mtu: 9214
  #     promisc: false
  server:
    ingress:
      public: true
      classes:
        namespace: "nginx"
        cluster: "nginx-cluster"
      annotations:
        nginx.ingress.kubernetes.io/rewrite-target: /
    external_policy_local: false
    node_port:
      enabled: false
      port: 30096

bootstrap:
  enabled: false
  ks_user: neutron
  script: |
    openstack token issue

dependencies:
  dynamic:
    common:
      local_image_registry:
        jobs:
          - neutron-image-repo-sync
        services:
          - endpoint: node
            service: local_image_registry
    targeted:
      sriov: {}
      openvswitch:
        dhcp:
          pod:
            - requireSameNode: true
              labels:
                application: neutron
                component: neutron-ovs-agent
        l3:
          pod:
            - requireSameNode: true
              labels:
                application: neutron
                component: neutron-ovs-agent
        metadata:
          pod:
            - requireSameNode: true
              labels:
                application: neutron
                component: neutron-ovs-agent
      linuxbridge:
        dhcp:
          pod:
            - requireSameNode: true
              labels:
                application: neutron
                component: neutron-lb-agent
        l3:
          pod:
            - requireSameNode: true
              labels:
                application: neutron
                component: neutron-lb-agent
        metadata:
          pod:
            - requireSameNode: true
              labels:
                application: neutron
                component: neutron-lb-agent
        lb_agent:
          pod: null
  static:
    bootstrap:
      services:
        - endpoint: internal
          service: network
        - endpoint: internal
          service: compute
    db_drop:
      services:
        - endpoint: internal
          service: oslo_db
    db_init:
      services:
        - endpoint: internal
          service: oslo_db
    db_sync:
      jobs:
        - neutron-db-init
      services:
        - endpoint: internal
          service: oslo_db
    dhcp:
      pod: null
      jobs:
        - neutron-rabbit-init
      services:
        - endpoint: internal
          service: oslo_messaging
        - endpoint: internal
          service: network
        - endpoint: internal
          service: compute
    ks_endpoints:
      jobs:
        - neutron-ks-service
      services:
        - endpoint: internal
          service: identity
    ks_service:
      services:
        - endpoint: internal
          service: identity
    ks_user:
      services:
        - endpoint: internal
          service: identity
    rabbit_init:
      services:
        - service: oslo_messaging
          endpoint: internal
    l3:
      pod: null
      jobs:
        - neutron-rabbit-init
      services:
        - endpoint: internal
          service: oslo_messaging
        - endpoint: internal
          service: network
        - endpoint: internal
          service: compute
    lb_agent:
      pod: null
      jobs:
        - neutron-rabbit-init
      services:
        - endpoint: internal
          service: oslo_messaging
        - endpoint: internal
          service: network
    metadata:
      pod: null
      jobs:
        - neutron-rabbit-init
      services:
        - endpoint: internal
          service: oslo_messaging
        - endpoint: internal
          service: network
        - endpoint: internal
          service: compute
        - endpoint: public
          service: compute_metadata
    ovs_agent:
      jobs:
        - neutron-rabbit-init
      pod:
        - requireSameNode: true
          labels:
            application: openvswitch
            component: openvswitch-vswitchd
        - requireSameNode: true
          labels:
            application: openvswitch
            component: openvswitch-vswitchd-db
      services:
        - endpoint: internal
          service: oslo_messaging
        - endpoint: internal
          service: network
    server:
      jobs:
        - neutron-db-sync
        - neutron-ks-user
        - neutron-ks-endpoints
        - neutron-rabbit-init
      services:
        - endpoint: internal
          service: oslo_db
        - endpoint: internal
          service: oslo_messaging
        - endpoint: internal
          service: oslo_cache
        - endpoint: internal
          service: identity
    tests:
      services:
        - endpoint: internal
          service: network
        - endpoint: internal
          service: compute
    image_repo_sync:
      services:
        - endpoint: internal
          service: local_image_registry

pod:
  user:
    neutron:
      uid: 42424
  affinity:
    anti:
      type:
        default: preferredDuringSchedulingIgnoredDuringExecution
      topologyKey:
        default: kubernetes.io/hostname
  mounts:
    neutron_server:
      init_container: null
      neutron_server:
        volumeMounts:
        volumes:
    neutron_dhcp_agent:
      init_container: null
      neutron_dhcp_agent:
        volumeMounts:
        volumes:
    neutron_l3_agent:
      init_container: null
      neutron_l3_agent:
        volumeMounts:
        volumes:
    neutron_lb_agent:
      init_container: null
      neutron_lb_agent:
        volumeMounts:
        volumes:
    neutron_metadata_agent:
      init_container: null
      neutron_metadata_agent:
        volumeMounts:
        volumes:
    neutron_ovs_agent:
      init_container: null
      neutron_ovs_agent:
        volumeMounts:
        volumes:
    neutron_sriov_agent:
      init_container: null
      neutron_sriov_agent:
        volumeMounts:
        volumes:
    neutron_tests:
      init_container: null
      neutron_tests:
        volumeMounts:
        volumes:
    neutron_bootstrap:
      init_container: null
      neutron_bootstrap:
        volumeMounts:
        volumes:
  replicas:
    server: 1
  lifecycle:
    upgrades:
      deployments:
        revision_history: 3
        pod_replacement_strategy: RollingUpdate
        rolling_update:
          max_unavailable: 1
          max_surge: 3
      daemonsets:
        pod_replacement_strategy: RollingUpdate
        dhcp_agent:
          enabled: false
          min_ready_seconds: 0
          max_unavailable: 1
        l3_agent:
          enabled: false
          min_ready_seconds: 0
          max_unavailable: 1
        lb_agent:
          enabled: true
          min_ready_seconds: 0
          max_unavailable: 1
        metadata_agent:
          enabled: true
          min_ready_seconds: 0
          max_unavailable: 1
        ovs_agent:
          enabled: true
          min_ready_seconds: 0
          max_unavailable: 1
        sriov_agent:
          enabled: true
          min_ready_seconds: 0
          max_unavailable: 1
    disruption_budget:
      server:
        min_available: 0
    termination_grace_period:
      server:
        timeout: 30
  resources:
    enabled: false
    agent:
      dhcp:
        requests:
          memory: "128Mi"
          cpu: "100m"
        limits:
          memory: "1024Mi"
          cpu: "2000m"
      l3:
        requests:
          memory: "128Mi"
          cpu: "100m"
        limits:
          memory: "1024Mi"
          cpu: "2000m"
      lb:
        requests:
          memory: "128Mi"
          cpu: "100m"
        limits:
          memory: "1024Mi"
          cpu: "2000m"
      metadata:
        requests:
          memory: "128Mi"
          cpu: "100m"
        limits:
          memory: "1024Mi"
          cpu: "2000m"
      ovs:
        requests:
          memory: "128Mi"
          cpu: "100m"
        limits:
          memory: "1024Mi"
          cpu: "2000m"
      sriov:
        requests:
          memory: "128Mi"
          cpu: "100m"
        limits:
          memory: "1024Mi"
          cpu: "2000m"
    server:
      requests:
        memory: "128Mi"
        cpu: "100m"
      limits:
        memory: "1024Mi"
        cpu: "2000m"
    jobs:
      bootstrap:
        requests:
          memory: "128Mi"
          cpu: "100m"
        limits:
          memory: "1024Mi"
          cpu: "2000m"
      db_init:
        requests:
          memory: "128Mi"
          cpu: "100m"
        limits:
          memory: "1024Mi"
          cpu: "2000m"
      rabbit_init:
        requests:
          memory: "128Mi"
          cpu: "100m"
        limits:
          memory: "1024Mi"
          cpu: "2000m"
      db_sync:
        requests:
          memory: "128Mi"
          cpu: "100m"
        limits:
          memory: "1024Mi"
          cpu: "2000m"
      db_drop:
        requests:
          memory: "128Mi"
          cpu: "100m"
        limits:
          memory: "1024Mi"
          cpu: "2000m"
      ks_endpoints:
        requests:
          memory: "128Mi"
          cpu: "100m"
        limits:
          memory: "1024Mi"
          cpu: "2000m"
      ks_service:
        requests:
          memory: "128Mi"
          cpu: "100m"
        limits:
          memory: "1024Mi"
          cpu: "2000m"
      ks_user:
        requests:
          memory: "128Mi"
          cpu: "100m"
        limits:
          memory: "1024Mi"
          cpu: "2000m"
      tests:
        requests:
          memory: "128Mi"
          cpu: "100m"
        limits:
          memory: "1024Mi"
          cpu: "2000m"
      image_repo_sync:
        requests:
          memory: "128Mi"
          cpu: "100m"
        limits:
          memory: "1024Mi"
          cpu: "2000m"

conf:
  rally_tests:
    run_tempest: false
    tests:
      NeutronNetworks.create_and_delete_networks:
        - args:
            network_create_args: {}
          context:
            quotas:
              neutron:
                network: -1
          runner:
            concurrency: 1
            times: 1
            type: constant
          sla:
            failure_rate:
              max: 0
      NeutronNetworks.create_and_delete_ports:
        - args:
            network_create_args: {}
            port_create_args: {}
            ports_per_network: 10
          context:
            network: {}
            quotas:
              neutron:
                network: -1
                port: -1
          runner:
            concurrency: 1
            times: 1
            type: constant
          sla:
            failure_rate:
              max: 0
      NeutronNetworks.create_and_delete_routers:
        - args:
            network_create_args: {}
            router_create_args: {}
            subnet_cidr_start: 1.1.0.0/30
            subnet_create_args: {}
            subnets_per_network: 2
          context:
            network: {}
            quotas:
              neutron:
                network: -1
                router: -1
                subnet: -1
          runner:
            concurrency: 1
            times: 1
            type: constant
          sla:
            failure_rate:
              max: 0
      NeutronNetworks.create_and_delete_subnets:
        - args:
            network_create_args: {}
            subnet_cidr_start: 1.1.0.0/30
            subnet_create_args: {}
            subnets_per_network: 2
          context:
            network: {}
            quotas:
              neutron:
                network: -1
                subnet: -1
          runner:
            concurrency: 1
            times: 1
            type: constant
          sla:
            failure_rate:
              max: 0
      NeutronNetworks.create_and_list_routers:
        - args:
            network_create_args: {}
            router_create_args: {}
            subnet_cidr_start: 1.1.0.0/30
            subnet_create_args: {}
            subnets_per_network: 2
          context:
            network: {}
            quotas:
              neutron:
                network: -1
                router: -1
                subnet: -1
          runner:
            concurrency: 1
            times: 1
            type: constant
          sla:
            failure_rate:
              max: 0
      NeutronNetworks.create_and_list_subnets:
        - args:
            network_create_args: {}
            subnet_cidr_start: 1.1.0.0/30
            subnet_create_args: {}
            subnets_per_network: 2
          context:
            network: {}
            quotas:
              neutron:
                network: -1
                subnet: -1
          runner:
            concurrency: 1
            times: 1
            type: constant
          sla:
            failure_rate:
              max: 0
      NeutronNetworks.create_and_show_network:
        - args:
            network_create_args: {}
          context:
            quotas:
              neutron:
                network: -1
          runner:
            concurrency: 1
            times: 1
            type: constant
          sla:
            failure_rate:
              max: 0
      NeutronNetworks.create_and_update_networks:
        - args:
            network_create_args: {}
            network_update_args:
              admin_state_up: false
              name: _updated
          context:
            quotas:
              neutron:
                network: -1
          runner:
            concurrency: 1
            times: 1
            type: constant
          sla:
            failure_rate:
              max: 0
      NeutronNetworks.create_and_update_ports:
        - args:
            network_create_args: {}
            port_create_args: {}
            port_update_args:
              admin_state_up: false
              device_id: dummy_id
              device_owner: dummy_owner
              name: _port_updated
            ports_per_network: 5
          context:
            network: {}
            quotas:
              neutron:
                network: -1
                port: -1
          runner:
            concurrency: 1
            times: 1
            type: constant
          sla:
            failure_rate:
              max: 0
      NeutronNetworks.create_and_update_routers:
        - args:
            network_create_args: {}
            router_create_args: {}
            router_update_args:
              admin_state_up: false
              name: _router_updated
            subnet_cidr_start: 1.1.0.0/30
            subnet_create_args: {}
            subnets_per_network: 2
          context:
            network: {}
            quotas:
              neutron:
                network: -1
                router: -1
                subnet: -1
          runner:
            concurrency: 1
            times: 1
            type: constant
          sla:
            failure_rate:
              max: 0
      NeutronNetworks.create_and_update_subnets:
        - args:
            network_create_args: {}
            subnet_cidr_start: 1.4.0.0/16
            subnet_create_args: {}
            subnet_update_args:
              enable_dhcp: false
              name: _subnet_updated
            subnets_per_network: 2
          context:
            network: {}
            quotas:
              neutron:
                network: -1
                subnet: -1
          runner:
            concurrency: 1
            times: 1
            type: constant
          sla:
            failure_rate:
              max: 0
      NeutronNetworks.list_agents:
        - args:
            agent_args: {}
          runner:
            concurrency: 1
            times: 1
            type: constant
          sla:
            failure_rate:
              max: 0
      NeutronSecurityGroup.create_and_list_security_groups:
        - args:
            security_group_create_args: {}
          context:
            quotas:
              neutron:
                security_group: -1
          runner:
            concurrency: 1
            times: 1
            type: constant
          sla:
            failure_rate:
              max: 0
      NeutronSecurityGroup.create_and_update_security_groups:
        - args:
            security_group_create_args: {}
            security_group_update_args: {}
          context:
            quotas:
              neutron:
                security_group: -1
          runner:
            concurrency: 1
            times: 1
            type: constant
          sla:
            failure_rate:
              max: 0
  paste:
    composite:neutron:
      use: egg:Paste#urlmap
      /: neutronversions_composite
      /v2.0: neutronapi_v2_0
    composite:neutronapi_v2_0:
      use: call:neutron.auth:pipeline_factory
      noauth: cors http_proxy_to_wsgi request_id catch_errors extensions neutronapiapp_v2_0
      keystone: cors http_proxy_to_wsgi request_id catch_errors authtoken audit keystonecontext extensions neutronapiapp_v2_0
    composite:neutronversions_composite:
      use: call:neutron.auth:pipeline_factory
      noauth: cors http_proxy_to_wsgi neutronversions
      keystone: cors http_proxy_to_wsgi neutronversions
    filter:request_id:
      paste.filter_factory: oslo_middleware:RequestId.factory
    filter:catch_errors:
      paste.filter_factory: oslo_middleware:CatchErrors.factory
    filter:cors:
      paste.filter_factory: oslo_middleware.cors:filter_factory
      oslo_config_project: neutron
    filter:http_proxy_to_wsgi:
      paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
    filter:keystonecontext:
      paste.filter_factory: neutron.auth:NeutronKeystoneContext.factory
    filter:authtoken:
      paste.filter_factory: keystonemiddleware.auth_token:filter_factory
    filter:audit:
      paste.filter_factory: keystonemiddleware.audit:filter_factory
      audit_map_file: /etc/neutron/api_audit_map.conf
    filter:extensions:
      paste.filter_factory: neutron.api.extensions:plugin_aware_extension_middleware_factory
    app:neutronversions:
      paste.app_factory: neutron.api.versions:Versions.factory
    app:neutronapiapp_v2_0:
      paste.app_factory: neutron.api.v2.router:APIRouter.factory
    filter:osprofiler:
      paste.filter_factory: osprofiler.web:WsgiMiddleware.factory
  policy:
    context_is_admin: role:admin
    owner: tenant_id:%(tenant_id)s
    admin_or_owner: rule:context_is_admin or rule:owner
    context_is_advsvc: role:advsvc
    admin_or_network_owner: rule:context_is_admin or tenant_id:%(network:tenant_id)s
    admin_owner_or_network_owner: rule:owner or rule:admin_or_network_owner
    admin_only: rule:context_is_admin
    regular_user: ''
    shared: field:networks:shared=True
    shared_subnetpools: field:subnetpools:shared=True
    shared_address_scopes: field:address_scopes:shared=True
    external: field:networks:router:external=True
    default: rule:admin_or_owner
    create_subnet: rule:admin_or_network_owner
    create_subnet:segment_id: rule:admin_only
    create_subnet:service_types: rule:admin_only
    get_subnet: rule:admin_or_owner or rule:shared
    get_subnet:segment_id: rule:admin_only
    update_subnet: rule:admin_or_network_owner
    update_subnet:service_types: rule:admin_only
    delete_subnet: rule:admin_or_network_owner
    create_subnetpool: ''
    create_subnetpool:shared: rule:admin_only
    create_subnetpool:is_default: rule:admin_only
    get_subnetpool: rule:admin_or_owner or rule:shared_subnetpools
    update_subnetpool: rule:admin_or_owner
    update_subnetpool:is_default: rule:admin_only
    delete_subnetpool: rule:admin_or_owner
    create_address_scope: ''
    create_address_scope:shared: rule:admin_only
    get_address_scope: rule:admin_or_owner or rule:shared_address_scopes
    update_address_scope: rule:admin_or_owner
    update_address_scope:shared: rule:admin_only
    delete_address_scope: rule:admin_or_owner
    create_network: ''
    get_network: rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc
    get_network:router:external: rule:regular_user
    get_network:segments: rule:admin_only
    get_network:provider:network_type: rule:admin_only
    get_network:provider:physical_network: rule:admin_only
    get_network:provider:segmentation_id: rule:admin_only
    get_network:queue_id: rule:admin_only
    get_network_ip_availabilities: rule:admin_only
    get_network_ip_availability: rule:admin_only
    create_network:shared: rule:admin_only
    create_network:router:external: rule:admin_only
    create_network:is_default: rule:admin_only
    create_network:segments: rule:admin_only
    create_network:provider:network_type: rule:admin_only
    create_network:provider:physical_network: rule:admin_only
    create_network:provider:segmentation_id: rule:admin_only
    update_network: rule:admin_or_owner
    update_network:segments: rule:admin_only
    update_network:shared: rule:admin_only
    update_network:provider:network_type: rule:admin_only
    update_network:provider:physical_network: rule:admin_only
    update_network:provider:segmentation_id: rule:admin_only
    update_network:router:external: rule:admin_only
    delete_network: rule:admin_or_owner
    create_segment: rule:admin_only
    get_segment: rule:admin_only
    update_segment: rule:admin_only
    delete_segment: rule:admin_only
    network_device: 'field:port:device_owner=~^network:'
    create_port: ''
    create_port:device_owner: not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner
    create_port:mac_address: rule:context_is_advsvc or rule:admin_or_network_owner
    create_port:fixed_ips: rule:context_is_advsvc or rule:admin_or_network_owner
    create_port:port_security_enabled: rule:context_is_advsvc or rule:admin_or_network_owner
    create_port:binding:host_id: rule:admin_only
    create_port:binding:profile: rule:admin_only
    create_port:mac_learning_enabled: rule:context_is_advsvc or rule:admin_or_network_owner
    create_port:allowed_address_pairs: rule:admin_or_network_owner
    get_port: rule:context_is_advsvc or rule:admin_owner_or_network_owner
    get_port:queue_id: rule:admin_only
    get_port:binding:vif_type: rule:admin_only
    get_port:binding:vif_details: rule:admin_only
    get_port:binding:host_id: rule:admin_only
    get_port:binding:profile: rule:admin_only
    update_port: rule:admin_or_owner or rule:context_is_advsvc
    update_port:device_owner: not rule:network_device or rule:context_is_advsvc or rule:admin_or_network_owner
    update_port:mac_address: rule:admin_only or rule:context_is_advsvc
    update_port:fixed_ips: rule:context_is_advsvc or rule:admin_or_network_owner
    update_port:port_security_enabled: rule:context_is_advsvc or rule:admin_or_network_owner
    update_port:binding:host_id: rule:admin_only
    update_port:binding:profile: rule:admin_only
    update_port:mac_learning_enabled: rule:context_is_advsvc or rule:admin_or_network_owner
    update_port:allowed_address_pairs: rule:admin_or_network_owner
    delete_port: rule:context_is_advsvc or rule:admin_owner_or_network_owner
    get_router:ha: rule:admin_only
    create_router: rule:regular_user
    create_router:external_gateway_info:enable_snat: rule:admin_only
    create_router:distributed: rule:admin_only
    create_router:ha: rule:admin_only
    get_router: rule:admin_or_owner
    get_router:distributed: rule:admin_only
    update_router:external_gateway_info:enable_snat: rule:admin_only
    update_router:distributed: rule:admin_only
    update_router:ha: rule:admin_only
    delete_router: rule:admin_or_owner
    add_router_interface: rule:admin_or_owner
    remove_router_interface: rule:admin_or_owner
    create_router:external_gateway_info:external_fixed_ips: rule:admin_only
    update_router:external_gateway_info:external_fixed_ips: rule:admin_only
    insert_rule: rule:admin_or_owner
    remove_rule: rule:admin_or_owner
    create_qos_queue: rule:admin_only
    get_qos_queue: rule:admin_only
    update_agent: rule:admin_only
    delete_agent: rule:admin_only
    get_agent: rule:admin_only
    create_dhcp-network: rule:admin_only
    delete_dhcp-network: rule:admin_only
    get_dhcp-networks: rule:admin_only
    create_l3-router: rule:admin_only
    delete_l3-router: rule:admin_only
    get_l3-routers: rule:admin_only
    get_dhcp-agents: rule:admin_only
    get_l3-agents: rule:admin_only
    get_loadbalancer-agent: rule:admin_only
    get_loadbalancer-pools: rule:admin_only
    get_agent-loadbalancers: rule:admin_only
    get_loadbalancer-hosting-agent: rule:admin_only
    create_floatingip: rule:regular_user
    create_floatingip:floating_ip_address: rule:admin_only
    update_floatingip: rule:admin_or_owner
    delete_floatingip: rule:admin_or_owner
    get_floatingip: rule:admin_or_owner
    create_network_profile: rule:admin_only
    update_network_profile: rule:admin_only
    delete_network_profile: rule:admin_only
    get_network_profiles: ''
    get_network_profile: ''
    update_policy_profiles: rule:admin_only
    get_policy_profiles: ''
    get_policy_profile: ''
    create_metering_label: rule:admin_only
    delete_metering_label: rule:admin_only
    get_metering_label: rule:admin_only
    create_metering_label_rule: rule:admin_only
    delete_metering_label_rule: rule:admin_only
    get_metering_label_rule: rule:admin_only
    get_service_provider: rule:regular_user
    get_lsn: rule:admin_only
    create_lsn: rule:admin_only
    create_flavor: rule:admin_only
    update_flavor: rule:admin_only
    delete_flavor: rule:admin_only
    get_flavors: rule:regular_user
    get_flavor: rule:regular_user
    create_service_profile: rule:admin_only
    update_service_profile: rule:admin_only
    delete_service_profile: rule:admin_only
    get_service_profiles: rule:admin_only
    get_service_profile: rule:admin_only
    get_policy: rule:regular_user
    create_policy: rule:admin_only
    update_policy: rule:admin_only
    delete_policy: rule:admin_only
    get_policy_bandwidth_limit_rule: rule:regular_user
    create_policy_bandwidth_limit_rule: rule:admin_only
    delete_policy_bandwidth_limit_rule: rule:admin_only
    update_policy_bandwidth_limit_rule: rule:admin_only
    get_policy_dscp_marking_rule: rule:regular_user
    create_policy_dscp_marking_rule: rule:admin_only
    delete_policy_dscp_marking_rule: rule:admin_only
    update_policy_dscp_marking_rule: rule:admin_only
    get_rule_type: rule:regular_user
    get_policy_minimum_bandwidth_rule: rule:regular_user
    create_policy_minimum_bandwidth_rule: rule:admin_only
    delete_policy_minimum_bandwidth_rule: rule:admin_only
    update_policy_minimum_bandwidth_rule: rule:admin_only
    restrict_wildcard: "(not field:rbac_policy:target_tenant=*) or rule:admin_only"
    create_rbac_policy: ''
    create_rbac_policy:target_tenant: rule:restrict_wildcard
    update_rbac_policy: rule:admin_or_owner
    update_rbac_policy:target_tenant: rule:restrict_wildcard and rule:admin_or_owner
    get_rbac_policy: rule:admin_or_owner
    delete_rbac_policy: rule:admin_or_owner
    create_flavor_service_profile: rule:admin_only
    delete_flavor_service_profile: rule:admin_only
    get_flavor_service_profile: rule:regular_user
    get_auto_allocated_topology: rule:admin_or_owner
    create_trunk: rule:regular_user
    get_trunk: rule:admin_or_owner
    delete_trunk: rule:admin_or_owner
    get_subports: ''
    add_subports: rule:admin_or_owner
    remove_subports: rule:admin_or_owner
  api_audit_map:
    DEFAULT:
      target_endpoint_type: None
    custom_actions:
      add_router_interface: update/add
      remove_router_interface: update/remove
    path_keywords:
      floatingips: ip
      healthmonitors: healthmonitor
      health_monitors: health_monitor
      lb: None
      members: member
      metering-labels: label
      metering-label-rules: rule
      networks: network
      pools: pool
      ports: port
      routers: router
      quotas: quota
      security-groups: security-group
      security-group-rules: rule
      subnets: subnet
      vips: vip
    service_endpoints:
      network: service/network
  neutron_sudoers: |
    # This sudoers file supports rootwrap for both Kolla and LOCI Images.
    Defaults !requiretty
    Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin"
    neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *, /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *
  rootwrap: |
    # Configuration for neutron-rootwrap
    # This file should be owned by (and only-writeable by) the root user

    [DEFAULT]
    # List of directories to load filter definitions from (separated by ',').
    # These directories MUST all be only writeable by root !
    filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap

    # List of directories to search executables in, in case filters do not
    # explicitely specify a full path (separated by ',')
    # If not specified, defaults to system PATH environment variable.
    # These directories MUST all be only writeable by root !
    exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin

    # Enable logging to syslog
    # Default value is False
    use_syslog=False

    # Which syslog facility to use.
    # Valid values include auth, authpriv, syslog, local0, local1...
    # Default value is 'syslog'
    syslog_log_facility=syslog

    # Which messages to log.
    # INFO means log all usage
    # ERROR means only log unsuccessful attempts
    syslog_log_level=ERROR

    [xenapi]
    # XenAPI configuration is only required by the L2 agent if it is to
    # target a XenServer/XCP compute host's dom0.
    xenapi_connection_url=<None>
    xenapi_connection_username=root
    xenapi_connection_password=<None>
  rootwrap_filters:
    debug:
      pods:
        - dhcp_agent
        - l3_agent
        - lb_agent
        - metadata_agent
        - ovs_agent
        - sriov_agent
      content: |
        # neutron-rootwrap command filters for nodes on which neutron is
        # expected to control network
        #
        # This file should be owned by (and only-writeable by) the root user

        # format seems to be
        # cmd-name: filter-name, raw-command, user, args

        [Filters]

        # This is needed because we should ping
        # from inside a namespace which requires root
        # _alt variants allow to match -c and -w in any order
        #   (used by NeutronDebugAgent.ping_all)
        ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+
        ping_alt: RegExpFilter, ping, root, ping, -c, \d+, -w, \d+, [0-9\.]+
        ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+
        ping6_alt: RegExpFilter, ping6, root, ping6, -c, \d+, -w, \d+, [0-9A-Fa-f:]+
    dibbler:
      pods:
        - dhcp_agent
        - l3_agent
        - lb_agent
        - metadata_agent
        - ovs_agent
        - sriov_agent
      content: |
        # neutron-rootwrap command filters for nodes on which neutron is
        # expected to control network
        #
        # This file should be owned by (and only-writeable by) the root user

        # format seems to be
        # cmd-name: filter-name, raw-command, user, args

        [Filters]

        # Filters for the dibbler-based reference implementation of the pluggable
        # Prefix Delegation driver. Other implementations using an alternative agent
        # should include a similar filter in this folder.

        # prefix_delegation_agent
        dibbler-client: CommandFilter, dibbler-client, root
    ipset_firewall:
      pods:
        - dhcp_agent
        - l3_agent
        - lb_agent
        - metadata_agent
        - ovs_agent
        - sriov_agent
      content: |
        # neutron-rootwrap command filters for nodes on which neutron is
        # expected to control network
        #
        # This file should be owned by (and only-writeable by) the root user

        # format seems to be
        # cmd-name: filter-name, raw-command, user, args

        [Filters]
        # neutron/agent/linux/iptables_firewall.py
        #   "ipset", "-A", ...
        ipset: CommandFilter, ipset, root
    l3:
      pods:
        - dhcp_agent
        - l3_agent
        - lb_agent
        - metadata_agent
        - ovs_agent
        - sriov_agent
      content: |
        # neutron-rootwrap command filters for nodes on which neutron is
        # expected to control network
        #
        # This file should be owned by (and only-writeable by) the root user

        # format seems to be
        # cmd-name: filter-name, raw-command, user, args

        [Filters]

        # arping
        arping: CommandFilter, arping, root

        # l3_agent
        sysctl: CommandFilter, sysctl, root
        route: CommandFilter, route, root
        radvd: CommandFilter, radvd, root

        # haproxy
        haproxy: RegExpFilter, haproxy, root, haproxy, -f, .*
        kill_haproxy: KillFilter, root, haproxy, -15, -9, -HUP

        # metadata proxy
        metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
        # RHEL invocation of the metadata proxy will report /usr/bin/python
        kill_metadata: KillFilter, root, python, -15, -9
        kill_metadata2: KillFilter, root, python2, -15, -9
        kill_metadata7: KillFilter, root, python2.7, -15, -9
        kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -15, -9, -HUP
        kill_radvd: KillFilter, root, /sbin/radvd, -15, -9, -HUP

        # ip_lib
        ip: IpFilter, ip, root
        find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
        ip_exec: IpNetnsExecFilter, ip, root

        # l3_tc_lib
        l3_tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+
        l3_tc_add_qdisc_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress
        l3_tc_add_qdisc_egress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, root, handle, 1:, htb
        l3_tc_show_filters: RegExpFilter, tc, root, tc, -p, -s, -d, filter, show, dev, .+, parent, .+, prio, 1
        l3_tc_delete_filters: RegExpFilter, tc, root, tc, filter, del, dev, .+, parent, .+, prio, 1, handle, .+, u32
        l3_tc_add_filter_ingress: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, dst, .+, police, rate, .+, burst, .+, drop, flowid, :1
        l3_tc_add_filter_egress:  RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, src, .+, police, rate, .+, burst, .+, drop, flowid, :1

        # For ip monitor
        kill_ip_monitor: KillFilter, root, ip, -9

        # ovs_lib (if OVSInterfaceDriver is used)
        ovs-vsctl: CommandFilter, ovs-vsctl, root

        # iptables_manager
        iptables-save: CommandFilter, iptables-save, root
        iptables-restore: CommandFilter, iptables-restore, root
        ip6tables-save: CommandFilter, ip6tables-save, root
        ip6tables-restore: CommandFilter, ip6tables-restore, root

        # Keepalived
        keepalived: CommandFilter, keepalived, root
        kill_keepalived: KillFilter, root, /usr/sbin/keepalived, -HUP, -15, -9

        # l3 agent to delete floatingip's conntrack state
        conntrack: CommandFilter, conntrack, root

        # keepalived state change monitor
        keepalived_state_change: CommandFilter, neutron-keepalived-state-change, root
    netns_cleanup:
      pods:
        - dhcp_agent
        - l3_agent
        - lb_agent
        - metadata_agent
        - ovs_agent
        - sriov_agent
      content: |
        # neutron-rootwrap command filters for nodes on which neutron is
        # expected to control network
        #
        # This file should be owned by (and only-writeable by) the root user

        # format seems to be
        # cmd-name: filter-name, raw-command, user, args

        [Filters]

        # netns-cleanup
        netstat: CommandFilter, netstat, root
    dhcp:
      pods:
        - dhcp_agent
        - l3_agent
        - lb_agent
        - metadata_agent
        - ovs_agent
        - sriov_agent
      content: |
        # neutron-rootwrap command filters for nodes on which neutron is
        # expected to control network
        #
        # This file should be owned by (and only-writeable by) the root user

        # format seems to be
        # cmd-name: filter-name, raw-command, user, args

        [Filters]

        # dhcp-agent
        dnsmasq: CommandFilter, dnsmasq, root
        # dhcp-agent uses kill as well, that's handled by the generic KillFilter
        # it looks like these are the only signals needed, per
        # neutron/agent/linux/dhcp.py
        kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP, -15
        kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP, -15

        ovs-vsctl: CommandFilter, ovs-vsctl, root
        ivs-ctl: CommandFilter, ivs-ctl, root
        mm-ctl: CommandFilter, mm-ctl, root
        dhcp_release: CommandFilter, dhcp_release, root
        dhcp_release6: CommandFilter, dhcp_release6, root

        # metadata proxy
        metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
        # RHEL invocation of the metadata proxy will report /usr/bin/python
        kill_metadata: KillFilter, root, python, -9
        kill_metadata2: KillFilter, root, python2, -9
        kill_metadata7: KillFilter, root, python2.7, -9

        # ip_lib
        ip: IpFilter, ip, root
        find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
        ip_exec: IpNetnsExecFilter, ip, root
    ebtables:
      pods:
        - dhcp_agent
        - l3_agent
        - lb_agent
        - metadata_agent
        - ovs_agent
        - sriov_agent
      content: |
        # neutron-rootwrap command filters for nodes on which neutron is
        # expected to control network
        #
        # This file should be owned by (and only-writeable by) the root user

        # format seems to be
        # cmd-name: filter-name, raw-command, user, args

        [Filters]

        ebtables: CommandFilter, ebtables, root
    iptables_firewall:
      pods:
        - dhcp_agent
        - l3_agent
        - lb_agent
        - metadata_agent
        - ovs_agent
        - sriov_agent
      content: |
        # neutron-rootwrap command filters for nodes on which neutron is
        # expected to control network
        #
        # This file should be owned by (and only-writeable by) the root user

        # format seems to be
        # cmd-name: filter-name, raw-command, user, args

        [Filters]

        # neutron/agent/linux/iptables_firewall.py
        #   "iptables-save", ...
        iptables-save: CommandFilter, iptables-save, root
        iptables-restore: CommandFilter, iptables-restore, root
        ip6tables-save: CommandFilter, ip6tables-save, root
        ip6tables-restore: CommandFilter, ip6tables-restore, root

        # neutron/agent/linux/iptables_firewall.py
        #   "iptables", "-A", ...
        iptables: CommandFilter, iptables, root
        ip6tables: CommandFilter, ip6tables, root

        # neutron/agent/linux/iptables_firewall.py
        sysctl: CommandFilter, sysctl, root

        # neutron/agent/linux/ip_conntrack.py
        conntrack: CommandFilter, conntrack, root
    linuxbridge_plugin:
      pods:
        - dhcp_agent
        - l3_agent
        - lb_agent
        - metadata_agent
        - ovs_agent
        - sriov_agent
      content: |
        # neutron-rootwrap command filters for nodes on which neutron is
        # expected to control network
        #
        # This file should be owned by (and only-writeable by) the root user

        # format seems to be
        # cmd-name: filter-name, raw-command, user, args

        [Filters]

        # linuxbridge-agent
        # unclear whether both variants are necessary, but I'm transliterating
        # from the old mechanism
        brctl: CommandFilter, brctl, root
        bridge: CommandFilter, bridge, root

        # ip_lib
        ip: IpFilter, ip, root
        find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
        ip_exec: IpNetnsExecFilter, ip, root

        # tc commands needed for QoS support
        tc_replace_tbf: RegExpFilter, tc, root, tc, qdisc, replace, dev, .+, root, tbf, rate, .+, latency, .+, burst, .+
        tc_add_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress, handle, .+
        tc_delete: RegExpFilter, tc, root, tc, qdisc, del, dev, .+, .+
        tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+
        tc_show_filters: RegExpFilter, tc, root, tc, filter, show, dev, .+, parent, .+
        tc_add_filter: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, all, prio, .+, basic, police, rate, .+, burst, .+, mtu, .+, drop
    openvswitch_plugin:
      pods:
        - dhcp_agent
        - l3_agent
        - lb_agent
        - metadata_agent
        - ovs_agent
        - sriov_agent
      content: |
        # neutron-rootwrap command filters for nodes on which neutron is
        # expected to control network
        #
        # This file should be owned by (and only-writeable by) the root user

        # format seems to be
        # cmd-name: filter-name, raw-command, user, args

        [Filters]

        # openvswitch-agent
        # unclear whether both variants are necessary, but I'm transliterating
        # from the old mechanism
        ovs-vsctl: CommandFilter, ovs-vsctl, root
        # NOTE(yamamoto): of_interface=native doesn't use ovs-ofctl
        ovs-ofctl: CommandFilter, ovs-ofctl, root
        kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9
        ovsdb-client: CommandFilter, ovsdb-client, root
        xe: CommandFilter, xe, root

        # ip_lib
        ip: IpFilter, ip, root
        find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
        ip_exec: IpNetnsExecFilter, ip, root

        # needed for FDB extension
        bridge: CommandFilter, bridge, root
      privsep:
        pods:
          - dhcp_agent
          - l3_agent
          - lb_agent
          - metadata_agent
          - ovs_agent
          - sriov_agent
        content: |
          # Command filters to allow privsep daemon to be started via rootwrap.
          #
          # This file should be owned by (and only-writeable by) the root user

          [Filters]

          # By installing the following, the local admin is asserting that:
          #
          # 1. The python module load path used by privsep-helper
          #    command as root (as started by sudo/rootwrap) is trusted.
          # 2. Any oslo.config files matching the --config-file
          #    arguments below are trusted.
          # 3. Users allowed to run sudo/rootwrap with this configuration(*) are
          #    also allowed to invoke python "entrypoint" functions from
          #    --privsep_context with the additional (possibly root) privileges
          #    configured for that context.
          #
          # (*) ie: the user is allowed by /etc/sudoers to run rootwrap as root
          #
          # In particular, the oslo.config and python module path must not
          # be writeable by the unprivileged user.

          # oslo.privsep default neutron context
          privsep: PathFilter, privsep-helper, root,
           --config-file, /etc,
           --privsep_context, neutron.privileged.default,
           --privsep_sock_path, /

          # NOTE: A second `--config-file` arg can also be added above. Since
          # many neutron components are installed like that (eg: by devstack).
          # Adjust to suit local requirements.
    taas:
      pods:
        - ovs_agent
        - sriov_agent
      content: |
        # neutron-rootwrap command filters for nodes on which neutron
        # tap-as-a-service(taas) is eanbled.  Taas uses this command
        # as part of its flow control.

        # format seems to be
        # cmd-name: filter-name, raw-command, user, args

        [Filters]

        # This is needed to allow taas to insert/remove vlan id to the
        # target vf under /sys/class/net/[device-name]/device/sriov/[vf-index]/[mirror]
        i40e_sysfs_command: RegExpFilter, i40e_sysfs_command, root, i40e_sysfs_command, \w+, .+, .+
  neutron:
    DEFAULT:
      log_config_append: /etc/neutron/logging.conf
      #NOTE(portdirect): the bind port should not be defined, and is manipulated
      # via the endpoints section.
      bind_port: null
      default_availability_zones: nova
      api_workers: 1
      rpc_workers: 4
      allow_overlapping_ips: True
      # core_plugin can be: ml2, calico
      core_plugin: ml2
      # service_plugin can be: router, odl-router, empty for calico,
      # networking_ovn.l3.l3_ovn.OVNL3RouterPlugin for OVN
      service_plugins: router
      allow_automatic_l3agent_failover: True
      l3_ha: True
      max_l3_agents_per_router: 2
      l3_ha_network_type: vxlan
      network_auto_schedule: True
      router_auto_schedule: True
      #(NOTE)portdirect: if unset this is populated dyanmicly from the value in
      # 'network.backend' to sane defaults.
      interface_driver: null
    oslo_concurrency:
      lock_path: /var/lib/neutron/tmp
    database:
      max_retries: -1
    agent:
      root_helper: sudo /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
    oslo_messaging_notifications:
      driver: messagingv2
    oslo_messaging_rabbit:
      rabbit_ha_queues: true
    oslo_middleware:
      enable_proxy_headers_parsing: true
    nova:
      auth_type: password
      auth_version: v3
      endpoint_type: internal
    keystone_authtoken:
      memcache_security_strategy: ENCRYPT
      auth_type: password
      auth_version: v3
  logging:
    loggers:
      keys:
        - root
        - neutron
        - neutron_taas
    handlers:
      keys:
        - stdout
        - stderr
        - "null"
    formatters:
      keys:
        - context
        - default
    logger_root:
      level: WARNING
      handlers: stdout
    logger_neutron:
      level: INFO
      handlers:
        - stdout
      qualname: neutron
    logger_neutron_taas:
      level: INFO
      handlers:
        - stdout
      qualname: neutron_taas
    logger_amqp:
      level: WARNING
      handlers: stderr
      qualname: amqp
    logger_amqplib:
      level: WARNING
      handlers: stderr
      qualname: amqplib
    logger_eventletwsgi:
      level: WARNING
      handlers: stderr
      qualname: eventlet.wsgi.server
    logger_sqlalchemy:
      level: WARNING
      handlers: stderr
      qualname: sqlalchemy
    logger_boto:
      level: WARNING
      handlers: stderr
      qualname: boto
    handler_null:
      class: logging.NullHandler
      formatter: default
      args: ()
    handler_stdout:
      class: StreamHandler
      args: (sys.stdout,)
      formatter: context
    handler_stderr:
      class: StreamHandler
      args: (sys.stderr,)
      formatter: context
    formatter_context:
      class: oslo_log.formatters.ContextFormatter
      datefmt: "%Y-%m-%d %H:%M:%S"
    formatter_default:
      format: "%(message)s"
      datefmt: "%Y-%m-%d %H:%M:%S"
  plugins:
    ml2_conf:
      ml2:
        extension_drivers: port_security
        #(NOTE)portdirect: if unset this is populated dyanmicly from the value
        # in 'network.backend' to sane defaults.
        mechanism_drivers: null
        type_drivers: flat,vlan,vxlan
        tenant_network_types: vxlan
      ml2_type_vxlan:
        vni_ranges: 1:1000
        vxlan_group: 239.1.1.1
      ml2_type_flat:
        flat_networks: "*"
      # If you want to use the external network as a tagged provider network,
      # a range should be specified including the intended VLAN target
      # using ml2_type_vlan.network_vlan_ranges:
      # ml2_type_vlan:
      #   network_vlan_ranges: "external:1100:1110"
      agent:
        extensions: ""
    ml2_conf_sriov: null
    taas:
      taas:
        enabled: False
    openvswitch_agent:
      agent:
        tunnel_types: vxlan
        l2_population: True
        arp_responder: True
      ovs:
        bridge_mappings: "external:br-ex"
      securitygroup:
        firewall_driver: neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
    linuxbridge_agent:
      linux_bridge:
        # To define Flat and VLAN connections, in LB we can assign
        # specific interface to the flat/vlan network name using:
        # physical_interface_mappings: "external:eth3"
        # Or we can set the mapping between the network and bridge:
        bridge_mappings: "external:br-ex"
        # The two above options are exclusive, do not use both of them at once
      securitygroup:
        firewall_driver: iptables
      vxlan:
        l2_population: True
        arp_responder: True
    macvtap_agent: null
    sriov_agent:
      securitygroup:
        firewall_driver: neutron.agent.firewall.NoopFirewallDriver
      sriov_nic:
        physical_device_mappings: physnet2:enp3s0f1
        # NOTE: do not use null here, use an empty string
        exclude_devices: ""
  dhcp_agent:
    DEFAULT:
      #(NOTE)portdirect: if unset this is populated dyanmicly from the value in
      # 'network.backend' to sane defaults.
      interface_driver: null
      dnsmasq_config_file: /etc/neutron/dnsmasq.conf
      force_metadata: True
  l3_agent:
    DEFAULT:
      #(NOTE)portdirect: if unset this is populated dyanmicly from the value in
      # 'network.backend' to sane defaults.
      interface_driver: null
      agent_mode: legacy
  metering_agent: null
  metadata_agent:
    DEFAULT:
      # we cannot change the proxy socket path as it is declared
      # as a hostPath volume from agent daemonsets
      metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy
      metadata_proxy_shared_secret: "password"
    cache:
      enabled: true
      backend: dogpile.cache.memcached

  rabbitmq:
    #NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones
    policies:
      - vhost: "neutron"
        name: "ha_ttl_neutron"
        definition:
          #mirror messges to other nodes in rmq cluster
          ha-mode: "all"
          ha-sync-mode: "automatic"
          #70s
          message-ttl: 70000
        priority: 0
        apply-to: all
        pattern: '(notifications)\.'
  ## NOTE: "besteffort" is meant for dev env with mixed compute type only.
  ##       This helps prevent sriov init script from failing due to mis-matched NIC
  ##       For prod env, target NIC should match and init script should fail otherwise.
  ## sriov_init:
  ##   - besteffort
  sriov_init:
    -
  # auto_bridge_add is a table of "bridge: interface" pairs
  # To automatically add a physical interfaces to a specific bridges,
  # for example eth3 to bridge br-physnet1, if0 to br0 and iface_two
  # to br1 do something like:
  #
  # auto_bridge_add:
  #   br-physnet1: eth3
  #   br0: if0
  #   br1: iface_two
  # br-ex will be added by default
  auto_bridge_add:
    br-ex: null

# Names of secrets used by bootstrap and environmental checks
secrets:
  identity:
    admin: neutron-keystone-admin
    neutron: neutron-keystone-user
    test: neutron-keystone-test
  oslo_db:
    admin: neutron-db-admin
    neutron: neutron-db-user
  oslo_messaging:
    admin: neutron-rabbitmq-admin
    neutron: neutron-rabbitmq-user
  tls:
    network:
      server:
        public: neutron-tls-public

# typically overridden by environmental
# values, but should include all endpoints
# required by this chart
endpoints:
  cluster_domain_suffix: cluster.local
  local_image_registry:
    name: docker-registry
    namespace: docker-registry
    hosts:
      default: localhost
      internal: docker-registry
      node: localhost
    host_fqdn_override:
      default: null
    port:
      registry:
        node: 5000
  oslo_db:
    auth:
      admin:
        username: root
        password: password
      neutron:
        username: neutron
        password: password
    hosts:
      default: mariadb
    host_fqdn_override:
      default: null
    path: /neutron
    scheme: mysql+pymysql
    port:
      mysql:
        default: 3306
  oslo_messaging:
    auth:
      admin:
        username: rabbitmq
        password: password
      neutron:
        username: neutron
        password: password
    hosts:
      default: rabbitmq
    host_fqdn_override:
      default: null
    path: /neutron
    scheme: rabbit
    port:
      amqp:
        default: 5672
      http:
        default: 15672
  oslo_cache:
    auth:
      # NOTE(portdirect): this is used to define the value for keystone
      # authtoken cache encryption key, if not set it will be populated
      # automatically with a random value, but to take advantage of
      # this feature all services should be set to use the same key,
      # and memcache service.
      memcache_secret_key: null
    hosts:
      default: memcached
    host_fqdn_override:
      default: null
    port:
      memcache:
        default: 11211
  compute:
    name: nova
    hosts:
      default: nova-api
      public: nova
    host_fqdn_override:
      default: null
    path:
      default: "/v2.1/%(tenant_id)s"
    scheme:
      default: 'http'
    port:
      api:
        default: 8774
        public: 80
      novncproxy:
        default: 6080
  compute_metadata:
    name: nova
    hosts:
      default: nova-metadata
      public: metadata
    host_fqdn_override:
      default: null
    path:
      default: /
    scheme:
      default: 'http'
    port:
      metadata:
        default: 8775
        public: 80
  identity:
    name: keystone
    auth:
      admin:
        region_name: RegionOne
        username: admin
        password: password
        project_name: admin
        user_domain_name: default
        project_domain_name: default
      neutron:
        role: admin
        region_name: RegionOne
        username: neutron
        password: password
        project_name: service
        user_domain_name: service
        project_domain_name: service
      nova:
        region_name: RegionOne
        project_name: service
        username: nova
        password: password
        user_domain_name: service
        project_domain_name: service
      test:
        role: admin
        region_name: RegionOne
        username: test
        password: password
        project_name: test
        user_domain_name: service
        project_domain_name: service
    hosts:
      default: keystone
      internal: keystone-api
    host_fqdn_override:
      default: null
    path:
      default: /v3
    scheme:
      default: http
    port:
      api:
        default: 80
        internal: 5000
  network:
    name: neutron
    hosts:
      default: neutron-server
      public: neutron
    host_fqdn_override:
      default: null
      # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
      # endpoints using the following format:
      # public:
      #   host: null
      #   tls:
      #     crt: null
      #     key: null
    path:
      default: null
    scheme:
      default: 'http'
    port:
      api:
        default: 9696
        public: 80
  fluentd:
    namespace: osh-infra
    name: fluentd
    hosts:
      default: fluentd-logging
    host_fqdn_override:
      default: null
    path:
      default: null
    scheme: 'http'
    port:
      service:
        default: 24224
      metrics:
        default: 24220
  #NOTE(tp6510): these endpoints allow for things like DNS lookups and apiserver access.
  # They are using to enable the Egress K8s network policy.
  k8s:
    port:
      api:
        default: 6443
        internal: 5000
  default:
    namespace: default
  kube_system:
    namespace: kube-system
  kube_public:
    namespace: kube-public

network_policy:
  neutron:
    # TODO(lamt): Need to tighten this ingress for security.
    ingress:
      - {}
    egress:
      - {}

manifests:
  configmap_bin: true
  configmap_etc: true
  daemonset_dhcp_agent: true
  daemonset_l3_agent: true
  daemonset_lb_agent: true
  daemonset_metadata_agent: true
  daemonset_ovs_agent: true
  daemonset_sriov_agent: true
  deployment_server: true
  ingress_server: true
  job_bootstrap: true
  job_db_init: true
  job_db_sync: true
  job_db_drop: false
  job_image_repo_sync: true
  job_ks_endpoints: true
  job_ks_service: true
  job_ks_user: true
  job_rabbit_init: true
  pdb_server: true
  pod_rally_test: true
  network_policy: false
  secret_db: true
  secret_ingress_tls: true
  secret_keystone: true
  secret_rabbitmq: true
  service_ingress_server: true
  service_server: true