2017-02-17 13:53:51 -05:00
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
2017-04-12 10:41:58 -05:00
# Default values for nova.
2017-01-04 13:19:04 -08:00
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
2020-05-21 10:09:37 -05:00
---
2017-08-07 11:37:42 -05:00
release_group : null
2017-01-04 13:19:04 -08:00
labels :
2017-03-22 21:15:40 -07:00
agent :
compute :
node_selector_key : openstack-compute-node
node_selector_value : enabled
2018-01-29 23:20:06 -05:00
compute_ironic :
node_selector_key : openstack-compute-node
node_selector_value : enabled
2018-02-19 12:13:54 -05:00
api_metadata :
node_selector_key : openstack-control-plane
node_selector_value : enabled
2017-03-22 21:15:40 -07:00
conductor :
node_selector_key : openstack-control-plane
node_selector_value : enabled
2018-02-19 12:13:54 -05:00
job :
2017-03-22 21:15:40 -07:00
node_selector_key : openstack-control-plane
node_selector_value : enabled
2018-02-19 12:13:54 -05:00
novncproxy :
2017-03-22 21:15:40 -07:00
node_selector_key : openstack-control-plane
node_selector_value : enabled
2018-02-19 12:13:54 -05:00
osapi :
2017-03-22 21:15:40 -07:00
node_selector_key : openstack-control-plane
node_selector_value : enabled
2018-02-19 12:13:54 -05:00
scheduler :
2017-03-22 21:15:40 -07:00
node_selector_key : openstack-control-plane
node_selector_value : enabled
2025-01-10 15:02:56 +09:00
serialproxy :
node_selector_key : openstack-control-plane
node_selector_value : enabled
2018-02-19 12:13:54 -05:00
spiceproxy :
2017-06-15 10:18:35 +09:00
node_selector_key : openstack-control-plane
node_selector_value : enabled
2018-02-19 12:13:54 -05:00
test :
2018-01-16 20:57:03 +09:00
node_selector_key : openstack-control-plane
node_selector_value : enabled
2017-04-11 15:47:12 -05:00
2017-01-11 14:29:04 +01:00
images :
2018-02-15 10:32:50 -05:00
pull_policy : IfNotPresent
2017-10-23 08:59:08 -05:00
tags :
2025-01-16 03:40:41 -06:00
bootstrap : quay.io/airshipit/heat:2024.1-ubuntu_jammy
db_drop : quay.io/airshipit/heat:2024.1-ubuntu_jammy
db_init : quay.io/airshipit/heat:2024.1-ubuntu_jammy
2024-07-11 12:56:20 -05:00
dep_check : 'quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_focal'
2024-04-29 21:40:13 -05:00
rabbit_init : docker.io/rabbitmq:3.13-management
2025-01-16 03:40:41 -06:00
ks_user : quay.io/airshipit/heat:2024.1-ubuntu_jammy
ks_service : quay.io/airshipit/heat:2024.1-ubuntu_jammy
nova_archive_deleted_rows : quay.io/airshipit/nova:2024.1-ubuntu_jammy
ks_endpoints : quay.io/airshipit/heat:2024.1-ubuntu_jammy
nova_api : quay.io/airshipit/nova:2024.1-ubuntu_jammy
nova_cell_setup : quay.io/airshipit/nova:2024.1-ubuntu_jammy
nova_cell_setup_init : quay.io/airshipit/heat:2024.1-ubuntu_jammy
nova_compute : quay.io/airshipit/nova:2024.1-ubuntu_jammy
2022-10-19 23:41:12 -05:00
nova_compute_ironic : 'docker.io/kolla/ubuntu-source-nova-compute-ironic:wallaby'
2025-01-16 03:40:41 -06:00
nova_compute_ssh : quay.io/airshipit/nova:2024.1-ubuntu_jammy
nova_conductor : quay.io/airshipit/nova:2024.1-ubuntu_jammy
nova_db_sync : quay.io/airshipit/nova:2024.1-ubuntu_jammy
nova_novncproxy : quay.io/airshipit/nova:2024.1-ubuntu_jammy
2022-10-19 23:41:12 -05:00
nova_novncproxy_assets : 'docker.io/kolla/ubuntu-source-nova-novncproxy:wallaby'
2025-01-16 03:40:41 -06:00
nova_scheduler : quay.io/airshipit/nova:2024.1-ubuntu_jammy
2024-01-30 07:40:14 -07:00
nova_storage_init : 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_jammy'
2018-07-13 13:56:28 -05:00
# NOTE(portdirect): we simply use the ceph config helper here,
# as it has both oscli and jq.
2024-01-30 07:40:14 -07:00
nova_service_cleaner : 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_jammy'
2025-01-10 15:02:56 +09:00
nova_serialproxy : quay.io/airshipit/nova:2024.1-ubuntu_jammy
2025-01-16 03:40:41 -06:00
nova_spiceproxy : quay.io/airshipit/nova:2024.1-ubuntu_jammy
nova_spiceproxy_assets : quay.io/airshipit/nova:2024.1-ubuntu_jammy
2020-07-22 12:53:58 -05:00
test : docker.io/xrally/xrally-openstack:2.0.0
2018-05-10 17:09:18 -05:00
image_repo_sync : docker.io/docker:17.07.0
2019-10-26 16:15:55 -05:00
nova_wait_for_computes_init : gcr.io/google_containers/hyperkube-amd64:v1.11.6
2018-05-10 17:09:18 -05:00
local_registry :
active : false
exclude :
- dep_check
- image_repo_sync
2017-01-04 13:19:04 -08:00
2018-05-01 02:13:36 -05:00
jobs :
# NOTE(portdirect): When using cells new nodes will be added to the cell on the hour by default.
# TODO(portdirect): Add a post-start action to nova compute pods that registers themselves.
cell_setup :
cron : "0 */1 * * *"
2018-10-11 16:02:02 -07:00
starting_deadline : 600
2018-05-01 02:13:36 -05:00
history :
success : 3
failed : 1
2021-10-22 01:28:46 +00:00
extended_wait :
enabled : false
iteration : 3
duration : 5
2023-11-06 16:06:27 -05:00
extra_command : null
2018-07-13 13:56:28 -05:00
service_cleaner :
cron : "0 */1 * * *"
2018-10-11 16:02:02 -07:00
starting_deadline : 600
2018-07-13 13:56:28 -05:00
history :
success : 3
failed : 1
2021-10-05 14:28:18 +00:00
sleep_time : 60
2023-11-08 15:53:20 -05:00
extra_command : null
2021-06-16 23:15:17 +05:45
archive_deleted_rows :
cron : "0 */1 * * *"
starting_deadline : 600
history :
success : 3
failed : 1
2018-05-01 02:13:36 -05:00
2017-04-01 11:38:49 -05:00
bootstrap :
enabled : true
2018-02-18 16:19:02 -05:00
ks_user : admin
2017-07-29 20:17:34 -05:00
script : null
2018-02-18 16:19:02 -05:00
structured :
flavors :
enabled : true
options :
m1_tiny :
2018-04-11 21:04:19 +00:00
name : "m1.tiny"
ram : 512
disk : 1
vcpus : 1
2018-02-18 16:19:02 -05:00
m1_small :
2018-04-11 21:04:19 +00:00
name : "m1.small"
ram : 2048
disk : 20
vcpus : 1
2018-02-18 16:19:02 -05:00
m1_medium :
2018-04-11 21:04:19 +00:00
name : "m1.medium"
ram : 4096
disk : 40
vcpus : 2
2018-02-18 16:19:02 -05:00
m1_large :
2018-04-11 21:04:19 +00:00
name : "m1.large"
ram : 8192
disk : 80
vcpus : 4
2018-02-18 16:19:02 -05:00
m1_xlarge :
2018-04-11 21:04:19 +00:00
name : "m1.xlarge"
ram : 16384
disk : 160
vcpus : 8
2019-10-26 16:15:55 -05:00
wait_for_computes :
enabled : false
# Wait percentage is the minimum percentage of compute hypervisors which
# must be available before the remainder of the bootstrap script can be run.
wait_percentage : 70
# Once the wait_percentage above is achieved, the remaining_wait is the
# amount of time in seconds to wait before executing the remainder of the
# boostrap script.
remaining_wait : 300
scripts :
init_script : |
# This runs in a bootstrap init container. It counts the number of compute nodes.
2021-02-27 09:29:37 +08:00
COMPUTE_NODES=$(kubectl get nodes -o custom-columns=NAME:.metadata.name -l openstack-compute-node=enabled --no-headers | sort)
2019-10-26 16:15:55 -05:00
/bin/echo $COMPUTE_NODES > /tmp/compute_nodes.txt
wait_script : |
# This script runs in the main bootstrap container just before the
# bootstrap.script is called.
COMPUTE_HOSTS=`cat /tmp/compute_nodes.txt | wc -w`
if [[ $COMPUTE_HOSTS == 0 ]]; then
echo "There are no compute hosts found!"
exit 1
fi
# Wait for all hypervisors to come up before moving on with the deployment
HYPERVISOR_WAIT=true
WAIT_AFTER_READY=0
SLEEP=5
while [[ $HYPERVISOR_WAIT == true ]]; do
# Its possible that openstack command may fail due to not being able to
# reach the compute service
set +e
HYPERVISORS=$(openstack hypervisor list -f value -c 'Hypervisor Hostname' | wc -w)
set -e
PERCENT_READY=$(( $HYPERVISORS * 100 / $COMPUTE_HOSTS ))
if [[ $PERCENT_READY -ge $WAIT_PERCENTAGE ]]; then
echo "Hypervisor ready percentage is $PERCENT_READY"
if [[ $PERCENT_READY == 100 ]]; then
HYPERVISOR_WAIT=false
echo "All hypervisors are ready."
elif [[ WAIT_AFTER_READY -ge $REMAINING_WAIT ]]; then
HYPERVISOR_WAIT=false
echo "Waited the configured time -- $HYPERVISORS out of $COMPUTE_HOSTS hypervisor(s) ready -- proceeding with the bootstrap."
else
sleep $SLEEP
WAIT_AFTER_READY=$(( $WAIT_AFTER_READY + $SLEEP ))
fi
else
echo "Waiting $SLEEP seconds for enough hypervisors to be discovered..."
sleep $SLEEP
fi
done
2017-04-01 11:38:49 -05:00
2017-01-04 13:19:04 -08:00
network :
2018-02-26 04:39:10 +00:00
# provide what type of network wiring will be used
# possible options: openvswitch, linuxbridge, sriov
backend :
- openvswitch
2017-03-30 19:42:51 -05:00
osapi :
port : 8774
2017-06-14 20:04:05 -05:00
ingress :
public : true
2018-03-20 16:47:29 -05:00
classes :
namespace : "nginx"
cluster : "nginx-cluster"
2018-01-05 00:29:05 -05:00
annotations :
2018-01-11 15:14:53 -05:00
nginx.ingress.kubernetes.io/rewrite-target : /
2017-11-02 14:37:09 +09:00
external_policy_local : false
2017-03-30 19:42:51 -05:00
node_port :
enabled : false
port : 30774
metadata :
port : 8775
2017-06-14 20:04:05 -05:00
ingress :
public : true
2018-03-20 16:47:29 -05:00
classes :
namespace : "nginx"
cluster : "nginx-cluster"
2018-01-05 00:29:05 -05:00
annotations :
2018-01-11 15:14:53 -05:00
nginx.ingress.kubernetes.io/rewrite-target : /
2017-11-02 14:37:09 +09:00
external_policy_local : false
2017-03-30 19:42:51 -05:00
node_port :
enabled : false
port : 30775
2017-06-15 10:18:35 +09:00
novncproxy :
2018-04-13 15:42:23 -05:00
ingress :
public : true
classes :
namespace : "nginx"
cluster : "nginx-cluster"
annotations :
nginx.ingress.kubernetes.io/rewrite-target : /
2017-06-15 10:18:35 +09:00
node_port :
enabled : false
2018-01-16 20:57:03 +09:00
port : 30680
2025-01-10 15:02:56 +09:00
serialproxy :
ingress :
public : true
classes :
namespace : "nginx"
cluster : "nginx-cluster"
annotations :
nginx.ingress.kubernetes.io/rewrite-target : /
node_port :
enabled : false
port : 30683
2018-01-16 20:57:03 +09:00
spiceproxy :
2024-01-11 10:35:14 +08:00
ingress :
public : true
classes :
namespace : "nginx"
cluster : "nginx-cluster"
annotations :
nginx.ingress.kubernetes.io/rewrite-target : /
2018-01-16 20:57:03 +09:00
node_port :
enabled : false
port : 30682
2017-09-08 09:14:38 -05:00
ssh :
2019-01-10 00:12:21 -05:00
enabled : false
2020-09-22 20:27:16 +03:00
port : 8022
from_subnet : 0.0 .0 .0 /0
key_types :
- rsa
- dsa
- ecdsa
- ed25519
private_key : 'null'
public_key : 'null'
2017-01-04 13:19:04 -08:00
dependencies :
2018-02-25 00:11:36 -05:00
dynamic :
2018-05-10 17:09:18 -05:00
common :
local_image_registry :
jobs :
- nova-image-repo-sync
services :
- endpoint : node
service : local_image_registry
2018-02-25 00:11:36 -05:00
targeted :
2023-08-21 22:42:17 +10:00
ovn :
compute :
pod :
- requireSameNode : true
labels :
application : ovn
component : ovn-controller
2018-02-26 04:39:10 +00:00
openvswitch :
2018-02-25 00:11:36 -05:00
compute :
2018-03-13 11:28:58 -05:00
pod :
2018-03-28 14:59:57 -05:00
- requireSameNode : true
labels :
2018-03-13 11:28:58 -05:00
application : neutron
component : neutron-ovs-agent
2018-02-25 00:11:36 -05:00
linuxbridge :
compute :
2018-03-13 11:28:58 -05:00
pod :
2018-03-28 14:59:57 -05:00
- requireSameNode : true
labels :
2018-03-13 11:28:58 -05:00
application : neutron
component : neutron-lb-agent
2018-02-26 04:39:10 +00:00
sriov :
compute :
2018-03-13 11:28:58 -05:00
pod :
2018-03-28 14:59:57 -05:00
- requireSameNode : true
labels :
2018-03-13 11:28:58 -05:00
application : neutron
component : neutron-sriov-agent
2018-02-23 10:14:20 -08:00
static :
api :
jobs :
- nova-db-sync
- nova-ks-user
- nova-ks-endpoints
2018-02-03 21:16:34 -08:00
- nova-rabbit-init
2018-02-23 10:14:20 -08:00
services :
- endpoint : internal
service : oslo_messaging
- endpoint : internal
service : oslo_db
- endpoint : internal
service : identity
2019-06-17 11:34:47 -05:00
api_metadata :
jobs :
- nova-db-sync
- nova-ks-user
- nova-ks-endpoints
- nova-rabbit-init
services :
- endpoint : internal
service : oslo_messaging
- endpoint : internal
service : oslo_db
- endpoint : internal
service : identity
2018-02-23 10:14:20 -08:00
bootstrap :
services :
- endpoint : internal
service : identity
- endpoint : internal
service : compute
cell_setup :
jobs :
- nova-db-sync
2018-02-03 21:16:34 -08:00
- nova-rabbit-init
2018-02-23 10:14:20 -08:00
services :
- endpoint : internal
service : oslo_messaging
- endpoint : internal
service : oslo_db
- endpoint : internal
service : identity
- endpoint : internal
service : compute
2018-05-01 02:13:36 -05:00
pod :
- requireSameNode : false
labels :
application : nova
component : compute
2018-07-13 13:56:28 -05:00
service_cleaner :
jobs :
- nova-db-sync
- nova-rabbit-init
services :
- endpoint : internal
service : oslo_messaging
- endpoint : internal
service : oslo_db
- endpoint : internal
service : identity
- endpoint : internal
service : compute
2018-02-23 10:14:20 -08:00
compute :
2018-03-13 11:28:58 -05:00
pod :
2018-03-28 14:59:57 -05:00
- requireSameNode : true
labels :
application : libvirt
component : libvirt
2018-02-23 10:14:20 -08:00
jobs :
- nova-db-sync
2018-02-03 21:16:34 -08:00
- nova-rabbit-init
2018-02-23 10:14:20 -08:00
services :
- endpoint : internal
service : oslo_messaging
- endpoint : internal
service : image
- endpoint : internal
service : compute
- endpoint : internal
service : network
2019-06-17 11:34:47 -05:00
- endpoint : internal
service : compute_metadata
2018-02-23 10:14:20 -08:00
compute_ironic :
jobs :
- nova-db-sync
2018-02-03 21:16:34 -08:00
- nova-rabbit-init
2018-02-23 10:14:20 -08:00
services :
- endpoint : internal
service : oslo_messaging
- endpoint : internal
service : image
- endpoint : internal
service : compute
- endpoint : internal
service : network
- endpoint : internal
service : baremetal
conductor :
jobs :
- nova-db-sync
2018-02-03 21:16:34 -08:00
- nova-rabbit-init
2018-02-23 10:14:20 -08:00
services :
- endpoint : internal
service : oslo_messaging
- endpoint : internal
service : oslo_db
- endpoint : internal
service : identity
- endpoint : internal
service : compute
db_drop :
services :
- endpoint : internal
service : oslo_db
2021-06-16 23:15:17 +05:45
archive_deleted_rows :
jobs :
- nova-db-init
- nova-db-sync
2018-02-23 10:14:20 -08:00
db_init :
services :
- endpoint : internal
service : oslo_db
db_sync :
jobs :
- nova-db-init
services :
- endpoint : internal
service : oslo_db
ks_endpoints :
jobs :
- nova-ks-service
services :
- endpoint : internal
service : identity
ks_service :
services :
- endpoint : internal
service : identity
ks_user :
services :
- endpoint : internal
service : identity
2018-02-03 21:16:34 -08:00
rabbit_init :
services :
2018-04-11 21:04:19 +00:00
- service : oslo_messaging
endpoint : internal
2018-02-23 10:14:20 -08:00
novncproxy :
jobs :
- nova-db-sync
services :
- endpoint : internal
service : oslo_db
2025-01-10 15:02:56 +09:00
serialproxy :
jobs :
- nova-db-sync
services :
- endpoint : internal
service : oslo_db
2019-04-04 21:49:53 +02:00
spiceproxy :
jobs :
- nova-db-sync
services :
- endpoint : internal
service : oslo_db
2018-02-23 10:14:20 -08:00
scheduler :
jobs :
- nova-db-sync
2018-02-03 21:16:34 -08:00
- nova-rabbit-init
2018-02-23 10:14:20 -08:00
services :
- endpoint : internal
service : oslo_messaging
- endpoint : internal
service : oslo_db
- endpoint : internal
service : identity
- endpoint : internal
service : compute
tests :
services :
- endpoint : internal
service : image
- endpoint : internal
service : compute
- endpoint : internal
service : network
2019-06-17 11:34:47 -05:00
- endpoint : internal
service : compute_metadata
2018-05-10 17:09:18 -05:00
image_repo_sync :
services :
- endpoint : internal
service : local_image_registry
2017-01-04 13:19:04 -08:00
2017-06-15 10:18:35 +09:00
console :
# serial | spice | novnc | none
console_kind : novnc
serial :
2025-01-10 15:02:56 +09:00
compute :
# IF blank, search default routing interface
server_proxyclient_interface : null
# or set network cidr
server_proxyclient_network_cidr : 0 /0
proxy :
# IF blank, search default routing interface
server_proxyclient_interface : null
# or set network cidr
server_proxyclient_network_cidr : 0 /0
2017-06-15 10:18:35 +09:00
spice :
2018-01-16 20:57:03 +09:00
compute :
# IF blank, search default routing interface
2023-12-29 12:09:37 +08:00
server_proxyclient_interface : null
# or set network cidr
server_proxyclient_network_cidr : 0 /0
2018-01-16 20:57:03 +09:00
proxy :
# IF blank, search default routing interface
2023-12-29 12:09:37 +08:00
server_proxyclient_interface : null
# or set network cidr
server_proxyclient_network_cidr : 0 /0
2017-06-15 10:18:35 +09:00
novnc :
compute :
# IF blank, search default routing interface
2023-12-29 12:09:37 +08:00
vncserver_proxyclient_interface : null
# or set network cidr
vncserver_proxyclient_network_cidr : 0 /0
2017-06-15 10:18:35 +09:00
vncproxy :
# IF blank, search default routing interface
2023-12-29 12:09:37 +08:00
vncserver_proxyclient_interface : null
# or set network cidr
vncserver_proxyclient_network_cidr : 0 /0
2023-09-25 15:34:55 -03:00
address_search_enabled : true
2017-04-10 12:41:41 -07:00
2018-08-30 11:36:32 -05:00
ceph_client :
configmap : ceph-etc
user_secret_name : pvc-ceph-client-key
2023-12-29 14:22:08 +08:00
rbd_pool :
app_name : nova-vms
replication : 3
crush_rule : replicated_rule
chunk_size : 8
2017-04-10 12:41:41 -07:00
conf :
2019-03-08 16:49:41 +01:00
security : |
#
# Disable access to the entire file system except for the directories that
# are explicitly allowed later.
#
# This currently breaks the configurations that come with some web application
# Debian packages.
#
#<Directory />
# AllowOverride None
# Require all denied
#</Directory>
# Changing the following options will not really affect the security of the
# server, but might make attacks slightly more difficult in some cases.
#
# ServerTokens
# This directive configures what you return as the Server HTTP response
# Header. The default is 'Full' which sends information about the OS-Type
# and compiled in modules.
# Set to one of: Full | OS | Minimal | Minor | Major | Prod
# where Full conveys the most information, and Prod the least.
ServerTokens Prod
#
# Optionally add a line containing the server version and virtual host
# name to server-generated pages (internal error documents, FTP directory
# listings, mod_status and mod_info output etc., but not CGI generated
# documents or custom error documents).
# Set to "EMail" to also include a mailto: link to the ServerAdmin.
# Set to one of: On | Off | EMail
ServerSignature Off
#
# Allow TRACE method
#
# Set to "extended" to also reflect the request body (only for testing and
# diagnostic purposes).
#
# Set to one of: On | Off | extended
TraceEnable Off
#
# Forbid access to version control directories
#
# If you use version control systems in your document root, you should
# probably deny access to their directories. For example, for subversion:
#
#<DirectoryMatch "/\.svn">
# Require all denied
#</DirectoryMatch>
#
# Setting this header will prevent MSIE from interpreting files as something
# else than declared by the content type in the HTTP headers.
# Requires mod_headers to be enabled.
#
#Header set X-Content-Type-Options: "nosniff"
#
# Setting this header will prevent other sites from embedding pages from this
# site as frames. This defends against clickjacking attacks.
# Requires mod_headers to be enabled.
#
#Header set X-Frame-Options: "sameorigin"
software :
apache2 :
binary : apache2
start_parameters : -DFOREGROUND
conf_dir : /etc/apache2/conf-enabled
site_dir : /etc/apache2/sites-enable
mods_dir : /etc/apache2/mods-available
a2enmod : null
a2dismod : null
2018-05-01 12:34:59 -05:00
ceph :
enabled : true
admin_keyring : null
cinder :
user : "cinder"
keyring : null
secret_uuid : 457eb676-33da-42ec-9a8c-9293d545c337
2017-06-23 09:40:38 +09:00
rally_tests :
run_tempest : false
2019-09-21 20:11:36 -05:00
clean_up : |
2020-05-07 10:38:56 -05:00
FLAVORS=$(openstack flavor list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }')
2019-09-21 20:11:36 -05:00
if [ -n "$FLAVORS" ]; then
echo $FLAVORS | xargs openstack flavor delete
fi
2020-05-07 10:38:56 -05:00
SERVERS=$(openstack server list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }')
2019-09-21 20:11:36 -05:00
if [ -n "$SERVERS" ]; then
echo $SERVERS | xargs openstack server delete
fi
2021-07-02 19:34:07 +00:00
IMAGES=$(openstack image list -f value | awk '$2 ~ /^c_rally_/ { print $1 }')
if [ -n "$IMAGES" ]; then
echo $IMAGES | xargs openstack image delete
fi
2017-10-04 01:06:08 -05:00
tests :
NovaAggregates.create_and_get_aggregate_details :
- args :
availability_zone : nova
runner :
concurrency : 1
times : 1
type : constant
sla :
failure_rate :
max : 0
NovaAggregates.create_and_update_aggregate :
- args :
availability_zone : nova
runner :
concurrency : 1
times : 1
type : constant
sla :
failure_rate :
max : 0
NovaAggregates.list_aggregates :
- runner :
concurrency : 1
times : 1
type : constant
sla :
failure_rate :
max : 0
NovaAvailabilityZones.list_availability_zones :
- args :
detailed : true
runner :
concurrency : 1
times : 1
type : constant
sla :
failure_rate :
max : 0
NovaFlavors.create_and_delete_flavor :
- args :
disk : 1
ram : 500
vcpus : 1
runner :
concurrency : 1
times : 1
type : constant
sla :
failure_rate :
max : 0
NovaFlavors.create_and_list_flavor_access :
- args :
disk : 1
ram : 500
vcpus : 1
runner :
concurrency : 1
times : 1
type : constant
sla :
failure_rate :
max : 0
NovaFlavors.create_flavor :
- args :
disk : 1
ram : 500
vcpus : 1
runner :
concurrency : 1
times : 1
type : constant
sla :
failure_rate :
max : 0
NovaFlavors.create_flavor_and_add_tenant_access :
- args :
disk : 1
ram : 500
vcpus : 1
runner :
concurrency : 1
times : 1
type : constant
sla :
failure_rate :
max : 0
NovaFlavors.create_flavor_and_set_keys :
- args :
disk : 1
extra_specs :
'quota:disk_read_bytes_sec' : 10240
ram : 500
vcpus : 1
runner :
concurrency : 1
times : 1
type : constant
sla :
failure_rate :
max : 0
NovaFlavors.list_flavors :
- args :
detailed : true
runner :
concurrency : 1
times : 1
type : constant
sla :
failure_rate :
max : 0
NovaHypervisors.list_and_get_hypervisors :
- args :
detailed : true
runner :
concurrency : 1
times : 1
type : constant
sla :
failure_rate :
max : 0
NovaHypervisors.list_and_get_uptime_hypervisors :
- args :
detailed : true
runner :
concurrency : 1
times : 1
type : constant
sla :
failure_rate :
max : 0
NovaHypervisors.list_and_search_hypervisors :
- args :
detailed : true
runner :
concurrency : 1
times : 1
type : constant
sla :
failure_rate :
max : 0
NovaHypervisors.list_hypervisors :
- args :
detailed : true
runner :
concurrency : 1
times : 1
type : constant
sla :
failure_rate :
max : 0
NovaHypervisors.statistics_hypervisors :
- args : {}
runner :
concurrency : 1
times : 1
type : constant
sla :
failure_rate :
max : 0
NovaKeypair.create_and_delete_keypair :
2018-01-27 11:12:30 -05:00
- runner :
2017-10-04 01:06:08 -05:00
concurrency : 1
times : 1
type : constant
sla :
failure_rate :
max : 0
NovaKeypair.create_and_list_keypairs :
2018-01-27 11:12:30 -05:00
- runner :
2017-10-04 01:06:08 -05:00
concurrency : 1
times : 1
type : constant
sla :
failure_rate :
max : 0
NovaServerGroups.create_and_list_server_groups :
- args :
all_projects : false
kwargs :
policies :
- affinity
runner :
concurrency : 1
times : 1
type : constant
sla :
failure_rate :
max : 0
NovaServices.list_services :
- runner :
concurrency : 1
times : 1
type : constant
sla :
failure_rate :
max : 0
2017-04-10 12:41:41 -07:00
paste :
2017-09-19 14:28:46 -05:00
composite:metadata :
use : egg:Paste#urlmap
/ : meta
pipeline:meta :
pipeline : cors metaapp
app:metaapp :
paste.app_factory : nova.api.metadata.handler:MetadataRequestHandler.factory
composite:osapi_compute :
use : call:nova.api.openstack.urlmap:urlmap_factory
/ : oscomputeversions
/v2 : openstack_compute_api_v21_legacy_v2_compatible
/v2.1 : openstack_compute_api_v21
composite:openstack_compute_api_v21 :
use : call:nova.api.auth:pipeline_factory_v21
noauth2 : cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2 osapi_compute_app_v21
2019-04-11 13:27:39 -05:00
keystone : cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken audit keystonecontext osapi_compute_app_v21
2017-09-19 14:28:46 -05:00
composite:openstack_compute_api_v21_legacy_v2_compatible :
use : call:nova.api.auth:pipeline_factory_v21
noauth2 : cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2 legacy_v2_compatible osapi_compute_app_v21
2019-04-11 13:27:39 -05:00
keystone : cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken audit keystonecontext legacy_v2_compatible osapi_compute_app_v21
2017-09-19 14:28:46 -05:00
filter:request_id :
paste.filter_factory : oslo_middleware:RequestId.factory
filter:compute_req_id :
paste.filter_factory : nova.api.compute_req_id:ComputeReqIdMiddleware.factory
filter:faultwrap :
paste.filter_factory : nova.api.openstack:FaultWrapper.factory
filter:noauth2 :
paste.filter_factory : nova.api.openstack.auth:NoAuthMiddleware.factory
filter:sizelimit :
paste.filter_factory : oslo_middleware:RequestBodySizeLimiter.factory
filter:http_proxy_to_wsgi :
paste.filter_factory : oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
filter:legacy_v2_compatible :
paste.filter_factory : nova.api.openstack:LegacyV2CompatibleWrapper.factory
app:osapi_compute_app_v21 :
paste.app_factory : nova.api.openstack.compute:APIRouterV21.factory
pipeline:oscomputeversions :
pipeline : faultwrap http_proxy_to_wsgi oscomputeversionapp
app:oscomputeversionapp :
paste.app_factory : nova.api.openstack.compute.versions:Versions.factory
filter:cors :
paste.filter_factory : oslo_middleware.cors:filter_factory
oslo_config_project : nova
filter:keystonecontext :
paste.filter_factory : nova.api.auth:NovaKeystoneContext.factory
filter:authtoken :
paste.filter_factory : keystonemiddleware.auth_token:filter_factory
2019-04-11 13:27:39 -05:00
filter:audit :
paste.filter_factory : keystonemiddleware.audit:filter_factory
audit_map_file : /etc/nova/api_audit_map.conf
2021-09-15 15:43:58 +05:45
policy : {}
2018-07-30 18:24:43 -05:00
nova_sudoers : |
# This sudoers file supports rootwrap for both Kolla and LOCI Images.
Defaults !requiretty
Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin"
nova ALL = (root) NOPASSWD : /var/lib/kolla/venv/bin/nova-rootwrap /etc/nova/rootwrap.conf *, /var/lib/openstack/bin/nova-rootwrap /etc/nova/rootwrap.conf *
2019-04-11 13:27:39 -05:00
api_audit_map :
DEFAULT :
target_endpoint_type : None
custom_actions :
enable : enable
disable : disable
delete : delete
startup : start/startup
shutdown : stop/shutdown
reboot : start/reboot
os-migrations/get : read
os-server-password/post : update
path_keywords :
add : None
action : None
enable : None
disable : None
configure-project : None
defaults : None
delete : None
detail : None
diagnostics : None
entries : entry
extensions : alias
flavors : flavor
images : image
ips : label
limits : None
metadata : key
os-agents : os-agent
os-aggregates : os-aggregate
os-availability-zone : None
os-certificates : None
os-cloudpipe : None
os-fixed-ips : ip
os-extra_specs : key
os-flavor-access : None
os-floating-ip-dns : domain
os-floating-ips-bulk : host
os-floating-ip-pools : None
os-floating-ips : floating-ip
os-hosts : host
os-hypervisors : hypervisor
os-instance-actions : instance-action
os-keypairs : keypair
os-migrations : None
os-networks : network
os-quota-sets : tenant
os-security-groups : security_group
os-security-group-rules : rule
os-server-password : None
os-services : None
os-simple-tenant-usage : tenant
os-virtual-interfaces : None
os-volume_attachments : attachment
os-volumes_boot : None
os-volumes : volume
os-volume-types : volume-type
os-snapshots : snapshot
reboot : None
servers : server
shutdown : None
startup : None
statistics : None
service_endpoints :
compute : service/compute
2018-08-01 13:49:06 -05:00
rootwrap : |
# Configuration for nova-rootwrap
# This file should be owned by (and only-writeable by) the root user
[ DEFAULT]
# List of directories to load filter definitions from (separated by ',').
# These directories MUST all be only writeable by root !
filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap
# List of directories to search executables in, in case filters do not
# explicitely specify a full path (separated by ',')
# If not specified, defaults to system PATH environment variable.
# These directories MUST all be only writeable by root !
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin
# Enable logging to syslog
# Default value is False
use_syslog=False
# Which syslog facility to use.
# Valid values include auth, authpriv, syslog, local0, local1...
# Default value is 'syslog'
syslog_log_facility=syslog
# Which messages to log.
# INFO means log all usage
# ERROR means only log unsuccessful attempts
syslog_log_level=ERROR
2017-08-24 17:23:54 -05:00
rootwrap_filters :
api_metadata :
2018-08-01 13:49:06 -05:00
pods :
- metadata
content : |
# nova-rootwrap command filters for api-metadata nodes
# This is needed on nova-api hosts running with "metadata" in enabled_apis
# or when running nova-api-metadata
# This file should be owned by (and only-writeable by) the root user
[ Filters]
# nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
iptables-save : CommandFilter, iptables-save, root
ip6tables-save : CommandFilter, ip6tables-save, root
# nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
iptables-restore : CommandFilter, iptables-restore, root
ip6tables-restore : CommandFilter, ip6tables-restore, root
2017-08-24 17:23:54 -05:00
compute :
2018-08-01 13:49:06 -05:00
pods :
- compute
content : |
# nova-rootwrap command filters for compute nodes
# This file should be owned by (and only-writeable by) the root user
[ Filters]
# nova/virt/disk/mount/api.py: 'kpartx', '-a', device
# nova/virt/disk/mount/api.py: 'kpartx', '-d', device
kpartx : CommandFilter, kpartx, root
# nova/virt/xenapi/vm_utils.py: tune2fs, -O ^has_journal, part_path
# nova/virt/xenapi/vm_utils.py: tune2fs, -j, partition_path
tune2fs : CommandFilter, tune2fs, root
# nova/virt/disk/mount/api.py: 'mount', mapped_device
# nova/virt/disk/api.py: 'mount', '-o', 'bind', src, target
# nova/virt/xenapi/vm_utils.py: 'mount', '-t', 'ext2,ext3,ext4,reiserfs'..
# nova/virt/configdrive.py: 'mount', device, mountdir
# nova/virt/libvirt/volume.py: 'mount', '-t', 'sofs' ...
mount : CommandFilter, mount, root
# nova/virt/disk/mount/api.py: 'umount', mapped_device
# nova/virt/disk/api.py: 'umount' target
# nova/virt/xenapi/vm_utils.py: 'umount', dev_path
# nova/virt/configdrive.py: 'umount', mountdir
umount : CommandFilter, umount, root
# nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-c', device, image
# nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-d', device
qemu-nbd : CommandFilter, qemu-nbd, root
# nova/virt/disk/mount/loop.py: 'losetup', '--find', '--show', image
# nova/virt/disk/mount/loop.py: 'losetup', '--detach', device
losetup : CommandFilter, losetup, root
# nova/virt/disk/vfs/localfs.py: 'blkid', '-o', 'value', '-s', 'TYPE', device
blkid : CommandFilter, blkid, root
# nova/virt/libvirt/utils.py: 'blockdev', '--getsize64', path
# nova/virt/disk/mount/nbd.py: 'blockdev', '--flushbufs', device
blockdev : RegExpFilter, blockdev, root, blockdev, (--getsize64|--flushbufs), /dev/.*
# nova/virt/disk/vfs/localfs.py: 'tee', canonpath
tee : CommandFilter, tee, root
# nova/virt/disk/vfs/localfs.py: 'mkdir', canonpath
mkdir : CommandFilter, mkdir, root
# nova/virt/disk/vfs/localfs.py: 'chown'
# nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
# nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
# nova/virt/libvirt/connection.py: 'chown', 'root', basepath('disk')
chown : CommandFilter, chown, root
# nova/virt/disk/vfs/localfs.py: 'chmod'
chmod : CommandFilter, chmod, root
# nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap'
# nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up'
# nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev
# nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i..
# nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'..
# nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',..
# nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',..
# nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev)
# nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1]
# nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge
# nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', ..
# nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',..
# nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ...
# nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,..
# nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up'
# nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up'
# nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..
# nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..
# nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'
# nova/network/linux_net.py: 'ip', 'route', 'add', ..
# nova/network/linux_net.py: 'ip', 'route', 'del', .
# nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev
ip : CommandFilter, ip, root
# nova/virt/libvirt/vif.py: 'tunctl', '-b', '-t', dev
# nova/network/linux_net.py: 'tunctl', '-b', '-t', dev
tunctl : CommandFilter, tunctl, root
# nova/virt/libvirt/vif.py: 'ovs-vsctl', ...
# nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...
# nova/network/linux_net.py: 'ovs-vsctl', ....
ovs-vsctl : CommandFilter, ovs-vsctl, root
# nova/virt/libvirt/vif.py: 'vrouter-port-control', ...
vrouter-port-control : CommandFilter, vrouter-port-control, root
# nova/virt/libvirt/vif.py: 'ebrctl', ...
ebrctl : CommandFilter, ebrctl, root
# nova/virt/libvirt/vif.py: 'mm-ctl', ...
mm-ctl : CommandFilter, mm-ctl, root
# nova/network/linux_net.py: 'ovs-ofctl', ....
ovs-ofctl : CommandFilter, ovs-ofctl, root
# nova/virt/libvirt/connection.py: 'dd', if=%s % virsh_output, ...
dd : CommandFilter, dd, root
# nova/virt/xenapi/volume_utils.py: 'iscsiadm', '-m', ...
iscsiadm : CommandFilter, iscsiadm, root
# nova/virt/libvirt/volume/aoe.py: 'aoe-revalidate', aoedev
# nova/virt/libvirt/volume/aoe.py: 'aoe-discover'
aoe-revalidate : CommandFilter, aoe-revalidate, root
aoe-discover : CommandFilter, aoe-discover, root
# nova/virt/xenapi/vm_utils.py: parted, --script, ...
# nova/virt/xenapi/vm_utils.py: 'parted', '--script', dev_path, ..*.
parted : CommandFilter, parted, root
# nova/virt/xenapi/vm_utils.py: 'pygrub', '-qn', dev_path
pygrub : CommandFilter, pygrub, root
# nova/virt/xenapi/vm_utils.py: fdisk %(dev_path)s
fdisk : CommandFilter, fdisk, root
# nova/virt/xenapi/vm_utils.py: e2fsck, -f, -p, partition_path
# nova/virt/disk/api.py: e2fsck, -f, -p, image
e2fsck : CommandFilter, e2fsck, root
# nova/virt/xenapi/vm_utils.py: resize2fs, partition_path
# nova/virt/disk/api.py: resize2fs, image
resize2fs : CommandFilter, resize2fs, root
# nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
iptables-save : CommandFilter, iptables-save, root
ip6tables-save : CommandFilter, ip6tables-save, root
# nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
iptables-restore : CommandFilter, iptables-restore, root
ip6tables-restore : CommandFilter, ip6tables-restore, root
# nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
# nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
arping : CommandFilter, arping, root
# nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
dhcp_release : CommandFilter, dhcp_release, root
# nova/network/linux_net.py: 'kill', '-9', pid
# nova/network/linux_net.py: 'kill', '-HUP', pid
kill_dnsmasq : KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP
# nova/network/linux_net.py: 'kill', pid
kill_radvd : KillFilter, root, /usr/sbin/radvd
# nova/network/linux_net.py: dnsmasq call
dnsmasq : EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq
# nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..
radvd : CommandFilter, radvd, root
# nova/network/linux_net.py: 'brctl', 'addbr', bridge
# nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
# nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
# nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
brctl : CommandFilter, brctl, root
# nova/virt/libvirt/utils.py: 'mkswap'
# nova/virt/xenapi/vm_utils.py: 'mkswap'
mkswap : CommandFilter, mkswap, root
# nova/virt/libvirt/utils.py: 'nova-idmapshift'
nova-idmapshift : CommandFilter, nova-idmapshift, root
# nova/virt/xenapi/vm_utils.py: 'mkfs'
# nova/utils.py: 'mkfs', fs, path, label
mkfs : CommandFilter, mkfs, root
# nova/virt/libvirt/utils.py: 'qemu-img'
qemu-img : CommandFilter, qemu-img, root
# nova/virt/disk/vfs/localfs.py: 'readlink', '-e'
readlink : CommandFilter, readlink, root
# nova/virt/disk/api.py:
mkfs.ext3 : CommandFilter, mkfs.ext3, root
mkfs.ext4 : CommandFilter, mkfs.ext4, root
mkfs.ntfs : CommandFilter, mkfs.ntfs, root
# nova/virt/libvirt/connection.py:
lvremove : CommandFilter, lvremove, root
# nova/virt/libvirt/utils.py:
lvcreate : CommandFilter, lvcreate, root
# nova/virt/libvirt/utils.py:
lvs : CommandFilter, lvs, root
# nova/virt/libvirt/utils.py:
vgs : CommandFilter, vgs, root
# nova/utils.py:read_file_as_root: 'cat', file_path
# (called from nova/virt/disk/vfs/localfs.py:VFSLocalFS.read_file)
read_passwd : RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/passwd
read_shadow : RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/shadow
# os-brick needed commands
read_initiator : ReadFileFilter, /etc/iscsi/initiatorname.iscsi
multipath : CommandFilter, multipath, root
# multipathd show status
multipathd : CommandFilter, multipathd, root
systool : CommandFilter, systool, root
vgc-cluster : CommandFilter, vgc-cluster, root
# os_brick/initiator/connector.py
drv_cfg : CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid
# TODO(smcginnis) Temporary fix.
# Need to pull in os-brick os-brick.filters file instead and clean
# out stale brick values from this file.
scsi_id : CommandFilter, /lib/udev/scsi_id, root
# os_brick.privileged.default oslo.privsep context
# This line ties the superuser privs with the config files, context name,
# and (implicitly) the actual python code invoked.
privsep-rootwrap : RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.*
# nova/storage/linuxscsi.py: sg_scan device
sg_scan : CommandFilter, sg_scan, root
# nova/volume/encryptors/cryptsetup.py:
# nova/volume/encryptors/luks.py:
ln : RegExpFilter, ln, root, ln, --symbolic, --force, /dev/mapper/crypt-.+, .+
# nova/volume/encryptors.py:
# nova/virt/libvirt/dmcrypt.py:
cryptsetup : CommandFilter, cryptsetup, root
# nova/virt/xenapi/vm_utils.py:
xenstore-read : CommandFilter, xenstore-read, root
# nova/virt/libvirt/utils.py:
rbd : CommandFilter, rbd, root
# nova/virt/libvirt/utils.py: 'shred', '-n3', '-s%d' % volume_size, path
shred : CommandFilter, shred, root
# nova/virt/libvirt/volume.py: 'cp', '/dev/stdin', delete_control..
cp : CommandFilter, cp, root
# nova/virt/xenapi/vm_utils.py:
sync : CommandFilter, sync, root
# nova/virt/libvirt/imagebackend.py:
ploop : RegExpFilter, ploop, root, ploop, restore-descriptor, .*
prl_disk_tool : RegExpFilter, prl_disk_tool, root, prl_disk_tool, resize, --size, .*M$, --resize_partition, --hdd, .*
# nova/virt/libvirt/utils.py: 'xend', 'status'
xend : CommandFilter, xend, root
# nova/virt/libvirt/utils.py:
touch : CommandFilter, touch, root
# nova/virt/libvirt/volume/vzstorage.py
pstorage-mount : CommandFilter, pstorage-mount, root
2017-08-24 17:23:54 -05:00
network :
2018-08-01 13:49:06 -05:00
pods :
- compute
content : |
# nova-rootwrap command filters for network nodes
# This file should be owned by (and only-writeable by) the root user
[ Filters]
# nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap'
# nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up'
# nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev
# nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i..
# nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'..
# nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',..
# nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',..
# nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev)
# nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1]
# nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge
# nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', ..
# nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',..
# nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ...
# nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,..
# nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up'
# nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up'
# nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..
# nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..
# nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'
# nova/network/linux_net.py: 'ip', 'route', 'add', ..
# nova/network/linux_net.py: 'ip', 'route', 'del', .
# nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev
ip : CommandFilter, ip, root
# nova/virt/libvirt/vif.py: 'ovs-vsctl', ...
# nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...
# nova/network/linux_net.py: 'ovs-vsctl', ....
ovs-vsctl : CommandFilter, ovs-vsctl, root
# nova/network/linux_net.py: 'ovs-ofctl', ....
ovs-ofctl : CommandFilter, ovs-ofctl, root
# nova/virt/libvirt/vif.py: 'ivs-ctl', ...
# nova/virt/libvirt/vif.py: 'ivs-ctl', 'del-port', ...
# nova/network/linux_net.py: 'ivs-ctl', ....
ivs-ctl : CommandFilter, ivs-ctl, root
# nova/virt/libvirt/vif.py: 'ifc_ctl', ...
ifc_ctl : CommandFilter, /opt/pg/bin/ifc_ctl, root
# nova/network/linux_net.py: 'ebtables', '-D' ...
# nova/network/linux_net.py: 'ebtables', '-I' ...
ebtables : CommandFilter, ebtables, root
ebtables_usr : CommandFilter, ebtables, root
# nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
iptables-save : CommandFilter, iptables-save, root
ip6tables-save : CommandFilter, ip6tables-save, root
# nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
iptables-restore : CommandFilter, iptables-restore, root
ip6tables-restore : CommandFilter, ip6tables-restore, root
# nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
# nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
arping : CommandFilter, arping, root
# nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
dhcp_release : CommandFilter, dhcp_release, root
# nova/network/linux_net.py: 'kill', '-9', pid
# nova/network/linux_net.py: 'kill', '-HUP', pid
kill_dnsmasq : KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP
# nova/network/linux_net.py: 'kill', pid
kill_radvd : KillFilter, root, /usr/sbin/radvd
# nova/network/linux_net.py: dnsmasq call
dnsmasq : EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq
# nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..
radvd : CommandFilter, radvd, root
# nova/network/linux_net.py: 'brctl', 'addbr', bridge
# nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
# nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
# nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
brctl : CommandFilter, brctl, root
# nova/network/linux_net.py: 'sysctl', ....
sysctl : CommandFilter, sysctl, root
# nova/network/linux_net.py: 'conntrack'
conntrack : CommandFilter, conntrack, root
# nova/network/linux_net.py: 'fp-vdev'
fp-vdev : CommandFilter, fp-vdev, root
2018-01-29 23:20:06 -05:00
nova_ironic :
DEFAULT :
scheduler_host_manager : ironic_host_manager
compute_driver : ironic.IronicDriver
2021-04-20 14:26:02 +08:00
ram_allocation_ratio : 1.0
cpu_allocation_ratio : 1.0
reserved_host_memory_mb : 0
2018-06-22 14:16:25 +09:00
libvirt :
2023-09-25 15:34:55 -03:00
address_search_enabled : true
# When "address_search_enabled", get the IP address to be used as the target for live migration
# traffic using interface name.
2018-06-22 14:16:25 +09:00
# If this option is set to None, the hostname of the migration target compute node will be used.
2023-12-29 12:09:37 +08:00
live_migration_interface : null
# or set cidr
live_migration_network_cidr : 0 /0
2018-12-21 09:37:30 +09:00
hypervisor :
2023-09-25 15:34:55 -03:00
address_search_enabled : true
2018-12-21 09:37:30 +09:00
# my_ip can be set automatically through this interface name.
2023-12-29 12:09:37 +08:00
host_interface : null
# If host_interface is null there is a fallback mechanism to search
# for interface with routing using host network cidr.
host_network_cidr : 0 /0
2019-06-28 07:50:32 -05:00
# This list is the keys to exclude from the config file ingested by nova-compute
nova_compute_redactions :
- database
- api_database
- cell0_database
2017-04-10 12:41:41 -07:00
nova :
2017-09-19 14:28:46 -05:00
DEFAULT :
2018-06-17 16:12:25 -05:00
log_config_append : /etc/nova/logging.conf
2017-09-19 14:28:46 -05:00
default_ephemeral_format : ext4
ram_allocation_ratio : 1.0
disk_allocation_ratio : 1.0
cpu_allocation_ratio : 3.0
state_path : /var/lib/nova
osapi_compute_listen : 0.0 .0 .0
2018-04-11 21:04:19 +00:00
# NOTE(portdirect): the bind port should not be defined, and is manipulated
2018-03-03 15:07:37 -05:00
# via the endpoints section.
osapi_compute_listen_port : null
2017-09-19 14:28:46 -05:00
osapi_compute_workers : 1
metadata_workers : 1
compute_driver : libvirt.LibvirtDriver
my_ip : 0.0 .0 .0
2017-09-28 13:06:51 -05:00
instance_usage_audit : True
instance_usage_audit_period : hour
2017-11-28 21:03:53 +00:00
resume_guests_state_on_host_boot : True
2017-04-10 12:41:41 -07:00
vnc :
2023-07-10 23:13:35 -06:00
auth_schemes : none
2017-09-19 14:28:46 -05:00
novncproxy_host : 0.0 .0 .0
2023-03-24 14:37:01 -04:00
server_listen : 0.0 .0 .0
2018-05-08 17:45:14 +09:00
# This would be set by each compute nodes's ip
2021-05-26 18:35:57 +08:00
# server_proxyclient_address: 127.0.0.1
2018-01-16 20:57:03 +09:00
spice :
html5proxy_host : 0.0 .0 .0
server_listen : 0.0 .0 .0
2018-05-08 17:45:14 +09:00
# This would be set by each compute nodes's ip
# server_proxyclient_address: 127.0.0.1
2025-01-10 15:02:56 +09:00
serial_console :
serialproxy_host : 0.0 .0 .0
# This would be set by each compute nodes's ip
# proxyclient_address: 127.0.0.1
2017-04-10 12:41:41 -07:00
conductor :
2017-09-19 14:28:46 -05:00
workers : 1
2024-09-09 15:09:37 +08:00
scheduler :
max_attempts : 10
discover_hosts_in_cells_interval : -1
workers : 1
2017-04-10 12:41:41 -07:00
oslo_policy :
2017-09-19 14:28:46 -05:00
policy_file : /etc/nova/policy.yaml
2017-04-10 12:41:41 -07:00
oslo_concurrency :
2017-09-19 14:28:46 -05:00
lock_path : /var/lib/nova/tmp
2017-06-14 20:04:05 -05:00
oslo_middleware :
2017-09-19 14:28:46 -05:00
enable_proxy_headers_parsing : true
2017-04-10 12:41:41 -07:00
glance :
2017-09-19 14:28:46 -05:00
num_retries : 3
2018-01-29 23:20:06 -05:00
ironic :
api_endpoint : null
auth_url : null
2017-04-10 12:41:41 -07:00
neutron :
2017-09-19 14:28:46 -05:00
metadata_proxy_shared_secret : "password"
service_metadata_proxy : True
auth_type : password
auth_version : v3
2023-08-21 19:58:37 +10:00
cinder :
catalog_info : volumev3::internalURL
2017-04-10 12:41:41 -07:00
database :
2017-09-19 14:28:46 -05:00
max_retries : -1
2017-04-10 12:41:41 -07:00
api_database :
2017-09-19 14:28:46 -05:00
max_retries : -1
2017-11-01 10:58:44 +09:00
cell0_database :
max_retries : -1
2017-04-10 12:41:41 -07:00
keystone_authtoken :
2023-07-23 14:15:08 +03:30
service_token_roles : service
service_token_roles_required : true
2017-09-19 14:28:46 -05:00
auth_type : password
auth_version : v3
2020-02-21 17:12:38 +00:00
memcache_security_strategy : ENCRYPT
2023-05-12 23:00:39 +10:00
service_type : compute
2023-02-11 04:14:27 +00:00
notifications :
notify_on_state_change : vm_and_task_state
2019-06-25 20:20:41 -04:00
service_user :
auth_type : password
2023-07-23 14:15:08 +03:30
send_service_user_token : true
2017-04-10 12:41:41 -07:00
libvirt :
2020-09-15 23:34:15 +03:00
connection_uri : "qemu+unix:///system?socket=/run/libvirt/libvirt-sock"
2017-09-19 14:28:46 -05:00
images_type : qcow2
images_rbd_pool : vms
images_rbd_ceph_conf : /etc/ceph/ceph.conf
2018-05-01 12:34:59 -05:00
rbd_user : cinder
2017-09-19 14:28:46 -05:00
rbd_secret_uuid : 457eb676-33da-42ec-9a8c-9293d545c337
disk_cachemodes : "network=writeback"
hw_disk_discard : unmap
2017-04-10 12:41:41 -07:00
upgrade_levels :
2017-09-19 14:28:46 -05:00
compute : auto
2017-04-10 12:41:41 -07:00
cache :
2017-09-19 14:28:46 -05:00
enabled : true
2019-03-20 11:45:33 +01:00
backend : dogpile.cache.memcached
2017-04-10 12:41:41 -07:00
wsgi :
2017-09-19 14:28:46 -05:00
api_paste_config : /etc/nova/api-paste.ini
2017-09-28 13:06:51 -05:00
oslo_messaging_notifications :
driver : messagingv2
2019-03-29 15:14:04 -05:00
oslo_messaging_rabbit :
rabbit_ha_queues : true
2017-08-14 13:52:15 +09:00
placement :
auth_type : password
auth_version : v3
2018-06-17 16:12:25 -05:00
logging :
loggers :
keys :
- root
- nova
2020-01-27 21:12:59 +00:00
- os.brick
2018-06-17 16:12:25 -05:00
handlers :
keys :
- stdout
- stderr
- "null"
formatters :
keys :
- context
- default
logger_root :
level : WARNING
2020-09-14 23:36:33 +03:00
handlers : 'null'
2018-06-17 16:12:25 -05:00
logger_nova :
level : INFO
handlers :
- stdout
qualname : nova
2020-01-27 21:12:59 +00:00
logger_os.brick :
level : INFO
handlers :
- stdout
qualname : os.brick
2018-06-17 16:12:25 -05:00
logger_amqp :
level : WARNING
handlers : stderr
qualname : amqp
logger_amqplib :
level : WARNING
handlers : stderr
qualname : amqplib
logger_eventletwsgi :
level : WARNING
handlers : stderr
qualname : eventlet.wsgi.server
logger_sqlalchemy :
level : WARNING
handlers : stderr
qualname : sqlalchemy
logger_boto :
level : WARNING
handlers : stderr
qualname : boto
handler_null :
class : logging.NullHandler
formatter : default
args : ()
handler_stdout :
class : StreamHandler
args : (sys.stdout,)
formatter : context
handler_stderr :
class : StreamHandler
args : (sys.stderr,)
formatter : context
formatter_context :
class : oslo_log.formatters.ContextFormatter
2019-02-19 17:42:11 +02:00
datefmt : "%Y-%m-%d %H:%M:%S"
2018-06-17 16:12:25 -05:00
formatter_default :
format : "%(message)s"
2019-02-19 17:42:11 +02:00
datefmt : "%Y-%m-%d %H:%M:%S"
2018-07-04 20:41:02 -07:00
rabbitmq :
2020-05-21 10:09:37 -05:00
# NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones
2018-07-04 20:41:02 -07:00
policies :
- vhost : "nova"
name : "ha_ttl_nova"
definition :
2020-05-21 10:09:37 -05:00
# mirror messges to other nodes in rmq cluster
2018-07-04 20:41:02 -07:00
ha-mode : "all"
ha-sync-mode : "automatic"
2020-05-21 10:09:37 -05:00
# 70s
2018-07-04 20:41:02 -07:00
message-ttl : 70000
priority : 0
apply-to : all
2019-07-30 09:54:16 -05:00
pattern : '^(?!(amq\.|reply_)).*'
2020-01-27 21:12:59 +00:00
enable_iscsi : false
2021-06-16 23:15:17 +05:45
archive_deleted_rows :
purge_deleted_rows : false
until_completion : true
all_cells : false
max_rows :
enabled : False
rows : 1000
before :
enabled : false
date : 'nil'
2023-11-01 14:23:37 +08:00
nova_api_uwsgi :
uwsgi :
add-header : "Connection: close"
buffer-size : 65535
die-on-term : true
enable-threads : true
exit-on-reload : false
hook-master-start : unix_signal:15 gracefully_kill_them_all
lazy-apps : true
log-x-forwarded-for : true
master : true
procname-prefix-spaced : "nova-api:"
route-user-agent : '^kube-probe.* donotlog:'
thunder-lock : true
worker-reload-mercy : 80
wsgi-file : /var/lib/openstack/bin/nova-api-wsgi
nova_metadata_uwsgi :
uwsgi :
add-header : "Connection: close"
buffer-size : 65535
die-on-term : true
enable-threads : true
exit-on-reload : false
hook-master-start : unix_signal:15 gracefully_kill_them_all
lazy-apps : true
log-x-forwarded-for : true
master : true
procname-prefix-spaced : "nova-metadata:"
route-user-agent : '^kube-probe.* donotlog:'
thunder-lock : true
worker-reload-mercy : 80
wsgi-file : /var/lib/openstack/bin/nova-metadata-wsgi
2017-04-10 12:41:41 -07:00
2017-07-02 19:43:43 -05:00
# Names of secrets used by bootstrap and environmental checks
secrets :
identity :
admin : nova-keystone-admin
2018-01-14 19:20:34 -05:00
nova : nova-keystone-user
2018-01-27 11:12:30 -05:00
test : nova-keystone-test
2017-07-02 19:43:43 -05:00
oslo_db :
admin : nova-db-admin
2018-01-14 19:20:34 -05:00
nova : nova-db-user
2017-07-02 19:43:43 -05:00
oslo_db_api :
admin : nova-db-api-admin
2018-01-14 19:20:34 -05:00
nova : nova-db-api-user
2017-11-01 10:58:44 +09:00
oslo_db_cell0 :
2020-08-13 20:55:40 +00:00
admin : nova-db-cell0-admin
nova : nova-db-cell0-user
2018-02-03 21:16:34 -08:00
oslo_messaging :
admin : nova-rabbitmq-admin
nova : nova-rabbitmq-user
2018-06-18 16:16:06 -05:00
tls :
compute :
osapi :
public : nova-tls-public
2020-06-03 12:40:40 -05:00
internal : nova-tls-api
2018-06-18 16:16:06 -05:00
compute_novnc_proxy :
novncproxy :
public : nova-novncproxy-tls-public
2020-06-03 12:40:40 -05:00
internal : nova-novncproxy-tls-proxy
2023-07-10 23:13:35 -06:00
vencrypt :
internal : nova-novncproxy-vencrypt
2020-06-03 12:40:40 -05:00
compute_metadata :
metadata :
public : metadata-tls-public
internal : metadata-tls-metadata
compute_spice_proxy :
spiceproxy :
2024-01-11 10:35:14 +08:00
public : nova-spiceproxy-tls-public
internal : nova-spiceproxy-tls-proxy
2025-01-10 15:02:56 +09:00
compute_serial_proxy :
serialproxy :
public : nova-serialproxy-tls-public
internal : nova-serialproxy-tls-proxy
2022-08-02 14:19:13 -04:00
oci_image_registry :
nova : nova-oci-image-registry
2017-04-10 12:41:41 -07:00
2018-03-23 05:30:25 +08:00
# typically overridden by environmental
2017-01-04 13:19:04 -08:00
# values, but should include all endpoints
# required by this chart
endpoints :
2017-08-30 23:31:16 -05:00
cluster_domain_suffix : cluster.local
2018-05-10 17:09:18 -05:00
local_image_registry :
name : docker-registry
namespace : docker-registry
hosts :
default : localhost
internal : docker-registry
node : localhost
host_fqdn_override :
default : null
port :
registry :
node : 5000
2022-08-02 14:19:13 -04:00
oci_image_registry :
name : oci-image-registry
namespace : oci-image-registry
auth :
enabled : false
nova :
username : nova
password : password
hosts :
default : localhost
host_fqdn_override :
default : null
port :
registry :
default : null
2017-04-10 12:41:41 -07:00
oslo_db :
auth :
admin :
username : root
password : password
2020-07-01 00:25:27 +00:00
secret :
tls :
internal : mariadb-tls-direct
2018-01-14 19:20:34 -05:00
nova :
2017-04-10 12:41:41 -07:00
username : nova
password : password
hosts :
default : mariadb
2017-09-06 10:13:00 -05:00
host_fqdn_override :
2017-08-30 23:31:16 -05:00
default : null
2017-04-10 12:41:41 -07:00
path : /nova
scheme : mysql+pymysql
port :
2017-06-14 20:04:05 -05:00
mysql :
default : 3306
2017-04-10 12:41:41 -07:00
oslo_db_api :
auth :
admin :
username : root
password : password
2018-01-14 19:20:34 -05:00
nova :
2017-04-10 12:41:41 -07:00
username : nova
password : password
hosts :
default : mariadb
2017-09-06 10:13:00 -05:00
host_fqdn_override :
2017-08-30 23:31:16 -05:00
default : null
2017-04-10 12:41:41 -07:00
path : /nova_api
scheme : mysql+pymysql
port :
2017-06-14 20:04:05 -05:00
mysql :
default : 3306
2017-11-01 10:58:44 +09:00
oslo_db_cell0 :
auth :
admin :
username : root
password : password
2018-01-14 19:20:34 -05:00
nova :
2017-11-01 10:58:44 +09:00
username : nova
password : password
hosts :
default : mariadb
host_fqdn_override :
default : null
2018-02-13 12:42:32 -05:00
path : /nova_cell0
2017-11-01 10:58:44 +09:00
scheme : mysql+pymysql
port :
mysql :
default : 3306
2017-04-10 12:41:41 -07:00
oslo_messaging :
auth :
2018-02-03 21:16:34 -08:00
admin :
2017-04-10 12:41:41 -07:00
username : rabbitmq
password : password
2021-01-28 00:10:36 -06:00
secret :
tls :
internal : rabbitmq-tls-direct
2018-02-03 21:16:34 -08:00
nova :
username : nova
password : password
2019-06-17 08:56:59 -05:00
statefulset :
replicas : 2
name : rabbitmq-rabbitmq
2017-04-10 12:41:41 -07:00
hosts :
default : rabbitmq
2017-09-06 10:13:00 -05:00
host_fqdn_override :
2017-08-30 23:31:16 -05:00
default : null
2018-04-17 09:35:14 -05:00
path : /nova
2017-04-10 12:41:41 -07:00
scheme : rabbit
port :
2017-06-14 20:04:05 -05:00
amqp :
default : 5672
2018-02-03 21:16:34 -08:00
http :
default : 15672
2017-04-10 12:41:41 -07:00
oslo_cache :
2020-02-21 17:12:38 +00:00
auth :
# NOTE(portdirect): this is used to define the value for keystone
# authtoken cache encryption key, if not set it will be populated
# automatically with a random value, but to take advantage of
# this feature all services should be set to use the same key,
# and memcache service.
memcache_secret_key : null
2017-04-10 12:41:41 -07:00
hosts :
2017-06-20 00:22:49 -05:00
default : memcached
2017-09-06 10:13:00 -05:00
host_fqdn_override :
2017-08-30 23:31:16 -05:00
default : null
2017-04-10 12:41:41 -07:00
port :
2017-06-14 20:04:05 -05:00
memcache :
default : 11211
2017-03-07 00:39:42 +00:00
identity :
name : keystone
2017-07-02 19:43:43 -05:00
auth :
admin :
region_name : RegionOne
username : admin
password : password
project_name : admin
user_domain_name : default
project_domain_name : default
2018-01-14 19:20:34 -05:00
nova :
2023-07-23 14:15:08 +03:30
role : admin,service
2017-07-02 19:43:43 -05:00
region_name : RegionOne
username : nova
password : password
project_name : service
2018-07-25 21:22:23 -05:00
user_domain_name : service
project_domain_name : service
2018-04-11 21:04:19 +00:00
# NOTE(portdirect): the neutron user is not managed by the nova chart
2018-01-14 19:20:34 -05:00
# these values should match those set in the neutron chart.
2017-09-26 07:26:51 -05:00
neutron :
region_name : RegionOne
project_name : service
2018-07-25 21:22:23 -05:00
user_domain_name : service
project_domain_name : service
2017-09-26 07:26:51 -05:00
username : neutron
password : password
2018-04-11 21:04:19 +00:00
# NOTE(portdirect): the ironic user is not managed by the nova chart
2018-01-29 23:20:06 -05:00
# these values should match those set in the ironic chart.
ironic :
auth_type : password
auth_version : v3
region_name : RegionOne
project_name : service
2018-07-25 21:22:23 -05:00
user_domain_name : service
project_domain_name : service
2018-01-29 23:20:06 -05:00
username : ironic
password : password
2017-08-14 13:52:15 +09:00
placement :
role : admin
region_name : RegionOne
username : placement
password : password
project_name : service
2018-07-25 21:22:23 -05:00
user_domain_name : service
project_domain_name : service
2023-08-21 19:58:37 +10:00
cinder :
role : admin,service
region_name : RegionOne
username : cinder
password : password
project_name : service
user_domain_name : service
project_domain_name : service
2018-01-27 11:12:30 -05:00
test :
role : admin
region_name : RegionOne
2019-06-03 11:02:29 -05:00
username : nova-test
2018-01-27 11:12:30 -05:00
password : password
project_name : test
2018-07-25 21:22:23 -05:00
user_domain_name : service
project_domain_name : service
2017-01-04 13:19:04 -08:00
hosts :
2018-08-23 11:33:21 -05:00
default : keystone
internal : keystone-api
2017-09-06 10:13:00 -05:00
host_fqdn_override :
2017-08-30 23:31:16 -05:00
default : null
2017-06-14 20:04:05 -05:00
path :
default : /v3
scheme :
default : http
2017-01-04 13:19:04 -08:00
port :
2017-06-14 20:04:05 -05:00
api :
default : 80
2018-08-03 14:49:53 -05:00
internal : 5000
2017-03-07 00:39:42 +00:00
image :
name : glance
hosts :
default : glance-api
2017-06-14 20:04:05 -05:00
public : glance
2017-09-06 10:13:00 -05:00
host_fqdn_override :
2017-08-30 23:31:16 -05:00
default : null
2017-06-14 20:04:05 -05:00
path :
default : null
scheme :
default : http
port :
api :
default : 9292
public : 80
compute :
name : nova
hosts :
default : nova-api
public : nova
2017-09-06 10:13:00 -05:00
host_fqdn_override :
2017-08-30 23:31:16 -05:00
default : null
2018-06-18 16:16:06 -05:00
# NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
# endpoints using the following format:
# public:
# host: null
# tls:
# crt: null
# key: null
2017-06-14 20:04:05 -05:00
path :
2018-03-22 16:30:32 -05:00
default : "/v2.1/%(tenant_id)s"
2017-06-14 20:04:05 -05:00
scheme :
default : 'http'
2021-12-02 17:29:33 +02:00
service : 'http'
2017-06-14 20:04:05 -05:00
port :
api :
default : 8774
public : 80
2021-12-02 17:29:33 +02:00
service : 8774
2017-06-14 20:04:05 -05:00
novncproxy :
default : 6080
compute_metadata :
name : nova
2018-03-10 18:13:22 +00:00
ip :
# IF blank, set clusterIP and metadata_host dynamically
ingress : null
2017-06-14 20:04:05 -05:00
hosts :
default : nova-metadata
public : metadata
2017-09-06 10:13:00 -05:00
host_fqdn_override :
2017-08-30 23:31:16 -05:00
default : null
2017-06-14 20:04:05 -05:00
path :
default : /
scheme :
default : 'http'
2017-03-07 00:39:42 +00:00
port :
2017-06-14 20:04:05 -05:00
metadata :
default : 8775
public : 80
2017-10-13 00:24:14 -05:00
compute_novnc_proxy :
name : nova
hosts :
default : nova-novncproxy
2018-04-13 15:42:23 -05:00
public : novncproxy
2017-10-13 00:24:14 -05:00
host_fqdn_override :
default : null
2018-06-18 16:16:06 -05:00
# NOTE(portdirect): this chart supports TLS for fqdn over-ridden public
# endpoints using the following format:
# public:
# host: null
# tls:
# crt: null
# key: null
2017-10-13 00:24:14 -05:00
path :
default : /vnc_auto.html
scheme :
default : 'http'
port :
novnc_proxy :
default : 6080
2018-04-13 15:42:23 -05:00
public : 80
2023-07-10 23:13:35 -06:00
# This endpoint is only to allow configuring the cert used specifically for
# vencrypt. Specifically, the same CA/issuer needs to be used to sign both
# this cert, and the libvirt/qemu certs.
compute_novnc_vencrypt :
hosts :
default : nova-novncproxy
host_fqdn_override :
default :
commonName : nova-novncproxy
usages :
- client auth
2025-01-10 15:02:56 +09:00
compute_serial_proxy :
name : nova
hosts :
default : nova-serialproxy
public : serialproxy
host_fqdn_override :
default : null
scheme :
default : 'ws'
path :
default : /serial_auto.html
port :
serial_proxy :
default : 6083
public : 80
2018-01-16 20:57:03 +09:00
compute_spice_proxy :
name : nova
hosts :
default : nova-spiceproxy
2024-01-11 10:35:14 +08:00
public : spiceproxy
2018-01-16 20:57:03 +09:00
host_fqdn_override :
default : null
path :
default : /spice_auto.html
scheme :
default : 'http'
port :
spice_proxy :
default : 6082
2024-01-11 10:35:14 +08:00
public : 80
2017-08-14 13:52:15 +09:00
placement :
name : placement
hosts :
default : placement-api
public : placement
host_fqdn_override :
default : null
path :
default : /
scheme :
default : 'http'
2021-12-02 17:29:33 +02:00
service : 'http'
2017-08-14 13:52:15 +09:00
port :
api :
default : 8778
public : 80
2021-12-02 17:29:33 +02:00
service : 8778
2017-03-07 00:39:42 +00:00
network :
name : neutron
2017-01-12 14:51:36 -08:00
hosts :
default : neutron-server
2017-06-14 20:04:05 -05:00
public : neutron
2017-09-06 10:13:00 -05:00
host_fqdn_override :
2017-08-30 23:31:16 -05:00
default : null
2017-06-14 20:04:05 -05:00
path :
default : null
scheme :
default : 'http'
2017-01-12 14:51:36 -08:00
port :
2017-06-14 20:04:05 -05:00
api :
default : 9696
public : 80
2018-01-29 23:20:06 -05:00
baremetal :
name : ironic
hosts :
default : ironic-api
public : ironic
host_fqdn_override :
default : null
path :
default : null
scheme :
default : http
port :
api :
default : 6385
public : 80
2018-06-17 16:12:25 -05:00
fluentd :
namespace : null
name : fluentd
hosts :
default : fluentd-logging
host_fqdn_override :
default : null
path :
default : null
scheme : 'http'
port :
service :
default : 24224
metrics :
default : 24220
2019-09-29 20:36:36 -05:00
# NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress
2018-10-31 11:00:48 -04:00
# They are using to enable the Egress K8s network policy.
2019-09-29 20:36:36 -05:00
kube_dns :
2018-10-31 11:00:48 -04:00
namespace : kube-system
2019-09-29 20:36:36 -05:00
name : kubernetes-dns
hosts :
default : kube-dns
host_fqdn_override :
default : null
path :
default : null
scheme : http
port :
dns :
default : 53
protocol : UDP
ingress :
namespace : null
name : ingress
hosts :
default : ingress
port :
ingress :
default : 80
2017-01-31 02:22:51 +02:00
2017-07-11 13:02:23 -05:00
pod :
2020-02-10 15:57:14 +02:00
probes :
rpc_timeout : 60
rpc_retries : 2
compute :
default :
liveness :
enabled : True
params :
periodSeconds : 90
timeoutSeconds : 70
readiness :
enabled : True
params :
periodSeconds : 90
timeoutSeconds : 70
2023-12-22 20:35:49 +11:00
startup :
enabled : True
params :
failureThreshold : 120
periodSeconds : 10
successThreshold : 1
timeoutSeconds : 70
2020-02-10 15:57:14 +02:00
api-metadata :
default :
liveness :
enabled : True
params :
2023-11-01 14:23:37 +08:00
initialDelaySeconds : 5
periodSeconds : 10
timeoutSeconds : 5
2020-02-10 15:57:14 +02:00
readiness :
enabled : True
params :
2023-11-01 14:23:37 +08:00
initialDelaySeconds : 5
periodSeconds : 10
timeoutSeconds : 5
2020-02-10 15:57:14 +02:00
api-osapi :
default :
liveness :
enabled : True
params :
2023-11-01 14:23:37 +08:00
initialDelaySeconds : 5
periodSeconds : 10
timeoutSeconds : 5
2020-02-10 15:57:14 +02:00
readiness :
enabled : True
params :
2023-11-01 14:23:37 +08:00
initialDelaySeconds : 5
periodSeconds : 10
timeoutSeconds : 5
2020-02-10 15:57:14 +02:00
conductor :
default :
liveness :
enabled : True
params :
initialDelaySeconds : 120
periodSeconds : 90
timeoutSeconds : 70
readiness :
enabled : True
params :
initialDelaySeconds : 80
periodSeconds : 90
timeoutSeconds : 70
novncproxy :
default :
liveness :
enabled : True
params :
initialDelaySeconds : 30
2021-06-18 19:12:15 -05:00
periodSeconds : 60
timeoutSeconds : 15
2020-02-10 15:57:14 +02:00
readiness :
enabled : True
params :
initialDelaySeconds : 30
2021-06-18 19:12:15 -05:00
periodSeconds : 60
timeoutSeconds : 15
2020-02-10 15:57:14 +02:00
scheduler :
default :
liveness :
enabled : True
params :
initialDelaySeconds : 120
periodSeconds : 90
timeoutSeconds : 70
readiness :
enabled : True
params :
initialDelaySeconds : 80
periodSeconds : 90
timeoutSeconds : 70
2025-01-10 15:02:56 +09:00
serialproxy :
default :
liveness :
enabled : True
params :
initialDelaySeconds : 30
periodSeconds : 60
timeoutSeconds : 15
readiness :
enabled : True
params :
initialDelaySeconds : 30
periodSeconds : 60
timeoutSeconds : 15
2020-02-10 15:57:14 +02:00
compute-spice-proxy :
default :
liveness :
enabled : True
params :
initialDelaySeconds : 30
2021-06-18 19:12:15 -05:00
periodSeconds : 60
timeoutSeconds : 15
2020-02-10 15:57:14 +02:00
readiness :
enabled : True
params :
initialDelaySeconds : 30
2021-06-18 19:12:15 -05:00
periodSeconds : 60
timeoutSeconds : 15
2019-04-22 14:06:12 -05:00
security_context :
nova :
pod :
runAsUser : 42424
container :
nova_compute_init :
readOnlyRootFilesystem : true
runAsUser : 0
2020-07-08 12:42:01 -05:00
tungstenfabric_compute_init :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
2019-04-22 14:06:12 -05:00
ceph_perms :
readOnlyRootFilesystem : true
runAsUser : 0
nova_compute_vnc_init :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
2025-01-10 15:02:56 +09:00
nova_compute_serial_init :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
2019-04-22 14:06:12 -05:00
nova_compute_spice_init :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
nova_compute :
readOnlyRootFilesystem : true
privileged : true
nova_compute_ssh :
privileged : true
2022-01-07 15:59:41 -03:00
runAsUser : 0
nova_compute_ssh_init :
runAsUser : 0
2019-04-22 14:06:12 -05:00
nova_api_metadata_init :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
nova_api :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
nova_osapi :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
nova_conductor :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
nova_novncproxy_init :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
nova_novncproxy_init_assests :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
nova_novncproxy :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
nova_scheduler :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
2025-01-10 15:02:56 +09:00
nova_serialproxy_init :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
nova_serialproxy :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
2019-04-22 14:06:12 -05:00
nova_spiceproxy_init :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
nova_spiceproxy_init_assets :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
nova_spiceproxy :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
2020-07-02 10:19:16 -05:00
bootstrap :
pod :
runAsUser : 42424
container :
2020-08-05 13:55:03 -05:00
nova_wait_for_computes_init :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
2020-07-02 10:19:16 -05:00
bootstrap :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
2020-08-05 13:55:03 -05:00
nova_cell_setup :
pod :
runAsUser : 42424
container :
2021-10-27 10:52:39 -05:00
nova_wait_for_computes_init :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
2020-08-05 13:55:03 -05:00
nova_cell_setup_init :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
nova_cell_setup :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
2021-06-16 23:15:17 +05:45
archive_deleted_rows :
pod :
runAsUser : 42424
container :
nova_archive_deleted_rows_init :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
nova_archive_deleted_rows :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
2020-07-02 10:19:16 -05:00
cell_setup :
pod :
runAsUser : 42424
container :
nova_cell_setup :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
service_cleaner :
pod :
runAsUser : 42424
container :
nova_service_cleaner :
readOnlyRootFilesystem : true
allowPrivilegeEscalation : false
2019-12-13 16:16:22 -06:00
use_fqdn :
# NOTE: If the option "host" is not specified in nova.conf, the host name
# shown in the hypervisor host is defaulted to the short name of the host.
# Setting the option here to true will cause use $(hostname --fqdn) as the
# host name by default. If the short name is desired $(hostname --short),
# set the option to false. Specifying a host in the nova.conf via the conf:
# section will supersede the value of this option.
compute : true
2017-07-27 16:15:46 -05:00
affinity :
2018-04-11 21:04:19 +00:00
anti :
type :
default : preferredDuringSchedulingIgnoredDuringExecution
topologyKey :
default : kubernetes.io/hostname
2019-05-10 22:05:24 -05:00
weight :
default : 10
2022-03-22 15:30:13 -03:00
tolerations :
nova :
enabled : false
tolerations :
- key : node-role.kubernetes.io/master
operator : Exists
effect : NoSchedule
2023-03-20 13:51:59 +08:00
- key : node-role.kubernetes.io/control-plane
operator : Exists
effect : NoSchedule
2017-07-14 10:44:35 -05:00
mounts :
nova_compute :
init_container : null
nova_compute :
2019-03-13 14:02:06 +01:00
volumeMounts :
volumes :
2018-01-29 23:20:06 -05:00
nova_compute_ironic :
init_container : null
nova_compute_ironic :
2019-03-13 14:02:06 +01:00
volumeMounts :
volumes :
2017-07-14 10:44:35 -05:00
nova_api_metadata :
init_container : null
nova_api_metadata :
2019-03-13 14:02:06 +01:00
volumeMounts :
volumes :
2017-07-14 10:44:35 -05:00
nova_api_osapi :
init_container : null
nova_api_osapi :
2019-03-13 14:02:06 +01:00
volumeMounts :
volumes :
2017-07-14 10:44:35 -05:00
nova_conductor :
init_container : null
nova_conductor :
2019-03-13 14:02:06 +01:00
volumeMounts :
volumes :
2017-07-14 10:44:35 -05:00
nova_scheduler :
init_container : null
nova_scheduler :
2019-03-13 14:02:06 +01:00
volumeMounts :
volumes :
2017-07-14 10:44:35 -05:00
nova_bootstrap :
init_container : null
nova_bootstrap :
2019-03-13 14:02:06 +01:00
volumeMounts :
volumes :
2017-07-14 10:44:35 -05:00
nova_tests :
init_container : null
nova_tests :
2019-03-13 14:02:06 +01:00
volumeMounts :
volumes :
2017-07-14 10:44:35 -05:00
nova_novncproxy :
init_novncproxy : null
nova_novncproxy :
2019-03-13 14:02:06 +01:00
volumeMounts :
volumes :
2025-01-10 15:02:56 +09:00
nova_serialproxy :
init_serialproxy : null
nova_serialproxy :
volumeMounts :
volumes :
2018-01-16 20:57:03 +09:00
nova_spiceproxy :
init_spiceproxy : null
nova_spiceproxy :
2019-03-13 14:02:06 +01:00
volumeMounts :
volumes :
2019-05-22 14:29:38 +09:00
nova_db_sync :
nova_db_sync :
volumeMounts :
volumes :
2019-09-04 19:36:31 +08:00
useHostNetwork :
novncproxy : true
2017-07-13 22:44:26 -05:00
replicas :
api_metadata : 1
2018-01-29 23:20:06 -05:00
compute_ironic : 1
2017-07-13 22:44:26 -05:00
osapi : 1
conductor : 1
scheduler : 1
novncproxy : 1
2025-01-10 15:02:56 +09:00
serialproxy : 1
2018-01-16 20:57:03 +09:00
spiceproxy : 1
2017-07-11 13:02:23 -05:00
lifecycle :
upgrades :
deployments :
revision_history : 3
pod_replacement_strategy : RollingUpdate
rolling_update :
max_unavailable : 1
max_surge : 3
daemonsets :
pod_replacement_strategy : RollingUpdate
compute :
enabled : true
min_ready_seconds : 0
max_unavailable : 1
disruption_budget :
metadata :
min_available : 0
osapi :
min_available : 0
termination_grace_period :
metadata :
timeout : 30
osapi :
timeout : 30
resources :
enabled : false
compute :
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
2018-01-29 23:20:06 -05:00
compute_ironic :
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
2017-07-11 13:02:23 -05:00
api_metadata :
2017-07-05 02:34:12 -05:00
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
2017-07-11 13:02:23 -05:00
api :
2017-07-05 02:34:12 -05:00
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
2017-07-11 13:02:23 -05:00
conductor :
2017-07-05 02:34:12 -05:00
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
2017-07-11 13:02:23 -05:00
scheduler :
2017-07-05 02:34:12 -05:00
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
2017-09-08 09:14:38 -05:00
ssh :
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
2017-07-11 13:02:23 -05:00
novncproxy :
2017-07-05 02:34:12 -05:00
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
2025-01-10 15:02:56 +09:00
serialproxy :
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
2018-01-16 20:57:03 +09:00
spiceproxy :
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
2017-07-11 13:02:23 -05:00
jobs :
bootstrap :
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
2023-12-29 14:22:08 +08:00
storage_init :
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
2017-07-11 13:02:23 -05:00
db_init :
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
2018-02-03 21:16:34 -08:00
rabbit_init :
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
2017-07-11 13:02:23 -05:00
db_sync :
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
2021-06-16 23:15:17 +05:45
archive_deleted_rows :
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
2017-09-25 13:52:44 -06:00
db_drop :
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
2017-07-11 13:02:23 -05:00
ks_endpoints :
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
ks_service :
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
ks_user :
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
tests :
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
2017-11-01 10:58:44 +09:00
cell_setup :
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
2018-07-13 13:56:28 -05:00
service_cleaner :
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
2018-05-10 17:09:18 -05:00
image_repo_sync :
requests :
memory : "128Mi"
cpu : "100m"
limits :
memory : "1024Mi"
cpu : "2000m"
2017-11-01 10:58:44 +09:00
2018-09-25 09:16:33 -05:00
network_policy :
nova :
# TODO(lamt): Need to tighten this ingress for security.
ingress :
- {}
2018-10-31 11:00:48 -04:00
egress :
- {}
2018-09-25 09:16:33 -05:00
2021-03-17 19:01:27 +05:45
# NOTE(helm_hook): helm_hook might break for helm2 binary.
# set helm3_hook: false when using the helm2 binary.
helm3_hook : true
2022-02-17 22:23:52 -08:00
health_probe :
logging :
level : ERROR
2021-12-02 19:33:39 +02:00
tls :
identity : false
oslo_messaging : false
oslo_db : false
2017-08-07 11:37:42 -05:00
manifests :
2020-06-03 12:40:40 -05:00
certificates : false
2017-08-07 11:37:42 -05:00
configmap_bin : true
configmap_etc : true
2018-05-01 02:13:36 -05:00
cron_job_cell_setup : true
2018-07-13 13:56:28 -05:00
cron_job_service_cleaner : true
2021-06-16 23:15:17 +05:45
cron_job_archive_deleted_rows : false
2017-08-07 11:37:42 -05:00
daemonset_compute : true
deployment_api_metadata : true
deployment_api_osapi : true
deployment_conductor : true
deployment_novncproxy : true
2025-01-10 15:02:56 +09:00
deployment_serialproxy : true
2018-01-16 20:57:03 +09:00
deployment_spiceproxy : true
2017-08-07 11:37:42 -05:00
deployment_scheduler : true
ingress_metadata : true
2018-04-13 15:42:23 -05:00
ingress_novncproxy : true
2025-01-10 15:02:56 +09:00
ingress_serialproxy : true
2024-01-11 10:35:14 +08:00
ingress_spiceproxy : true
2017-08-07 11:37:42 -05:00
ingress_osapi : true
job_bootstrap : true
2023-12-29 14:22:08 +08:00
job_storage_init : true
2017-08-07 11:37:42 -05:00
job_db_init : true
job_db_sync : true
2017-09-25 13:52:44 -06:00
job_db_drop : false
2018-05-10 17:09:18 -05:00
job_image_repo_sync : true
2018-02-03 21:16:34 -08:00
job_rabbit_init : true
2017-08-07 11:37:42 -05:00
job_ks_endpoints : true
job_ks_service : true
job_ks_user : true
2018-05-07 10:59:04 -05:00
job_cell_setup : true
2017-08-07 11:37:42 -05:00
pdb_metadata : true
pdb_osapi : true
pod_rally_test : true
2018-09-25 09:16:33 -05:00
network_policy : false
2017-08-07 11:37:42 -05:00
secret_db_api : true
2020-08-13 20:55:40 +00:00
secret_db_cell0 : true
2017-08-07 11:37:42 -05:00
secret_db : true
2018-06-18 16:16:06 -05:00
secret_ingress_tls : true
2017-08-07 11:37:42 -05:00
secret_keystone : true
2018-02-03 21:16:34 -08:00
secret_rabbitmq : true
2022-08-02 14:19:13 -04:00
secret_registry : true
2017-08-07 11:37:42 -05:00
service_ingress_metadata : true
2018-04-13 15:42:23 -05:00
service_ingress_novncproxy : true
2025-01-10 15:02:56 +09:00
service_ingress_serialproxy : true
2024-01-11 10:35:14 +08:00
service_ingress_spiceproxy : true
2017-08-07 11:37:42 -05:00
service_ingress_osapi : true
service_metadata : true
service_novncproxy : true
2025-01-10 15:02:56 +09:00
service_serialproxy : true
2018-01-16 20:57:03 +09:00
service_spiceproxy : true
2017-08-07 11:37:42 -05:00
service_osapi : true
2018-01-29 23:20:06 -05:00
statefulset_compute_ironic : false
2020-05-21 10:09:37 -05:00
...