commit 1eeb58d2bc6cf363f4fa63448dfc1e549636ff8e Author: Liam Young Date: Thu Jan 13 13:38:15 2022 +0000 Initial Cut diff --git a/charms/nova-k8s/.flake8 b/charms/nova-k8s/.flake8 new file mode 100644 index 00000000..8ef84fcd --- /dev/null +++ b/charms/nova-k8s/.flake8 @@ -0,0 +1,9 @@ +[flake8] +max-line-length = 99 +select: E,W,F,C,N +exclude: + venv + .git + build + dist + *.egg_info diff --git a/charms/nova-k8s/.gitignore b/charms/nova-k8s/.gitignore new file mode 100644 index 00000000..de9170b0 --- /dev/null +++ b/charms/nova-k8s/.gitignore @@ -0,0 +1,10 @@ +venv/ +build/ +*.charm +*.swp + +.coverage +__pycache__/ +*.py[cod] +.tox +.stestr/ diff --git a/charms/nova-k8s/.jujuignore b/charms/nova-k8s/.jujuignore new file mode 100644 index 00000000..6ccd559e --- /dev/null +++ b/charms/nova-k8s/.jujuignore @@ -0,0 +1,3 @@ +/venv +*.py[cod] +*.charm diff --git a/charms/nova-k8s/.stestr.conf b/charms/nova-k8s/.stestr.conf new file mode 100644 index 00000000..5fcccaca --- /dev/null +++ b/charms/nova-k8s/.stestr.conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./unit_tests +top_dir=./ diff --git a/charms/nova-k8s/CONTRIBUTING.md b/charms/nova-k8s/CONTRIBUTING.md new file mode 100644 index 00000000..35019ea5 --- /dev/null +++ b/charms/nova-k8s/CONTRIBUTING.md @@ -0,0 +1,34 @@ +# sunbeam-nova-operator + +## Developing + +Create and activate a virtualenv with the development requirements: + + virtualenv -p python3 venv + source venv/bin/activate + pip install -r requirements-dev.txt + +## Code overview + +TEMPLATE-TODO: +One of the most important things a consumer of your charm (or library) +needs to know is what set of functionality it provides. Which categories +does it fit into? Which events do you listen to? Which libraries do you +consume? Which ones do you export and how are they used? + +## Intended use case + +TEMPLATE-TODO: +Why were these decisions made? What's the scope of your charm? + +## Roadmap + +If this Charm doesn't fulfill all of the initial functionality you were +hoping for or planning on, please add a Roadmap or TODO here + +## Testing + +The Python operator framework includes a very nice harness for testing +operator behaviour without full deployment. Just `run_tests`: + + ./run_tests diff --git a/charms/nova-k8s/LICENSE b/charms/nova-k8s/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/charms/nova-k8s/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/charms/nova-k8s/README.md b/charms/nova-k8s/README.md new file mode 100644 index 00000000..b6ea66b2 --- /dev/null +++ b/charms/nova-k8s/README.md @@ -0,0 +1,24 @@ +# sunbeam-nova-operator + +## Description + +TODO: Describe your charm in a few paragraphs of Markdown + +## Usage + +TODO: Provide high-level usage, such as required config or relations + + +## Relations + +TODO: Provide any relations which are provided or required by your charm + +## OCI Images + +TODO: Include a link to the default image your charm uses + +## Contributing + +Please see the [Juju SDK docs](https://juju.is/docs/sdk) for guidelines +on enhancements to this charm following best practice guidelines, and +`CONTRIBUTING.md` for developer guidance. diff --git a/charms/nova-k8s/actions.yaml b/charms/nova-k8s/actions.yaml new file mode 100644 index 00000000..88e6195d --- /dev/null +++ b/charms/nova-k8s/actions.yaml @@ -0,0 +1,2 @@ +# NOTE: no actions yet! +{ } diff --git a/charms/nova-k8s/charmcraft.yaml b/charms/nova-k8s/charmcraft.yaml new file mode 100644 index 00000000..be706d54 --- /dev/null +++ b/charms/nova-k8s/charmcraft.yaml @@ -0,0 +1,14 @@ +# Learn more about charmcraft.yaml configuration at: +# https://juju.is/docs/sdk/charmcraft-config +type: "charm" +bases: + - build-on: + - name: "ubuntu" + channel: "20.04" + run-on: + - name: "ubuntu" + channel: "20.04" +parts: + charm: + build-packages: + - git diff --git a/charms/nova-k8s/config.yaml b/charms/nova-k8s/config.yaml new file mode 100644 index 00000000..830481f0 --- /dev/null +++ b/charms/nova-k8s/config.yaml @@ -0,0 +1,27 @@ +options: + debug: + default: False + description: Enable debug logging. + type: boolean + os-admin-hostname: + default: nova.juju + description: | + The hostname or address of the admin endpoints that should be advertised + in the nova image provider. + type: string + os-internal-hostname: + default: nova.juju + description: | + The hostname or address of the internal endpoints that should be advertised + in the nova image provider. + type: string + os-public-hostname: + default: nova.juju + description: | + The hostname or address of the internal endpoints that should be advertised + in the nova image provider. + type: string + region: + default: RegionOne + description: Space delimited list of OpenStack regions + type: string diff --git a/charms/nova-k8s/lib/charms/nginx_ingress_integrator/v0/ingress.py b/charms/nova-k8s/lib/charms/nginx_ingress_integrator/v0/ingress.py new file mode 100644 index 00000000..c8d2e0b1 --- /dev/null +++ b/charms/nova-k8s/lib/charms/nginx_ingress_integrator/v0/ingress.py @@ -0,0 +1,211 @@ +"""Library for the ingress relation. + +This library contains the Requires and Provides classes for handling +the ingress interface. + +Import `IngressRequires` in your charm, with two required options: + - "self" (the charm itself) + - config_dict + +`config_dict` accepts the following keys: + - service-hostname (required) + - service-name (required) + - service-port (required) + - additional-hostnames + - limit-rps + - limit-whitelist + - max-body-size + - path-routes + - retry-errors + - rewrite-enabled + - rewrite-target + - service-namespace + - session-cookie-max-age + - tls-secret-name + +See [the config section](https://charmhub.io/nginx-ingress-integrator/configure) for descriptions +of each, along with the required type. + +As an example, add the following to `src/charm.py`: +``` +from charms.nginx_ingress_integrator.v0.ingress import IngressRequires + +# In your charm's `__init__` method. +self.ingress = IngressRequires(self, {"service-hostname": self.config["external_hostname"], + "service-name": self.app.name, + "service-port": 80}) + +# In your charm's `config-changed` handler. +self.ingress.update_config({"service-hostname": self.config["external_hostname"]}) +``` +And then add the following to `metadata.yaml`: +``` +requires: + ingress: + interface: ingress +``` +You _must_ register the IngressRequires class as part of the `__init__` method +rather than, for instance, a config-changed event handler. This is because +doing so won't get the current relation changed event, because it wasn't +registered to handle the event (because it wasn't created in `__init__` when +the event was fired). +""" + +import logging + +from ops.charm import CharmEvents +from ops.framework import EventBase, EventSource, Object +from ops.model import BlockedStatus + +# The unique Charmhub library identifier, never change it +LIBID = "db0af4367506491c91663468fb5caa4c" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 9 + +logger = logging.getLogger(__name__) + +REQUIRED_INGRESS_RELATION_FIELDS = { + "service-hostname", + "service-name", + "service-port", +} + +OPTIONAL_INGRESS_RELATION_FIELDS = { + "additional-hostnames", + "limit-rps", + "limit-whitelist", + "max-body-size", + "retry-errors", + "rewrite-target", + "rewrite-enabled", + "service-namespace", + "session-cookie-max-age", + "tls-secret-name", + "path-routes", +} + + +class IngressAvailableEvent(EventBase): + pass + + +class IngressCharmEvents(CharmEvents): + """Custom charm events.""" + + ingress_available = EventSource(IngressAvailableEvent) + + +class IngressRequires(Object): + """This class defines the functionality for the 'requires' side of the 'ingress' relation. + + Hook events observed: + - relation-changed + """ + + def __init__(self, charm, config_dict): + super().__init__(charm, "ingress") + + self.framework.observe(charm.on["ingress"].relation_changed, self._on_relation_changed) + + self.config_dict = config_dict + + def _config_dict_errors(self, update_only=False): + """Check our config dict for errors.""" + blocked_message = "Error in ingress relation, check `juju debug-log`" + unknown = [ + x + for x in self.config_dict + if x not in REQUIRED_INGRESS_RELATION_FIELDS | OPTIONAL_INGRESS_RELATION_FIELDS + ] + if unknown: + logger.error( + "Ingress relation error, unknown key(s) in config dictionary found: %s", + ", ".join(unknown), + ) + self.model.unit.status = BlockedStatus(blocked_message) + return True + if not update_only: + missing = [x for x in REQUIRED_INGRESS_RELATION_FIELDS if x not in self.config_dict] + if missing: + logger.error( + "Ingress relation error, missing required key(s) in config dictionary: %s", + ", ".join(missing), + ) + self.model.unit.status = BlockedStatus(blocked_message) + return True + return False + + def _on_relation_changed(self, event): + """Handle the relation-changed event.""" + # `self.unit` isn't available here, so use `self.model.unit`. + if self.model.unit.is_leader(): + if self._config_dict_errors(): + return + for key in self.config_dict: + event.relation.data[self.model.app][key] = str(self.config_dict[key]) + + def update_config(self, config_dict): + """Allow for updates to relation.""" + if self.model.unit.is_leader(): + self.config_dict = config_dict + if self._config_dict_errors(update_only=True): + return + relation = self.model.get_relation("ingress") + if relation: + for key in self.config_dict: + relation.data[self.model.app][key] = str(self.config_dict[key]) + + +class IngressProvides(Object): + """This class defines the functionality for the 'provides' side of the 'ingress' relation. + + Hook events observed: + - relation-changed + """ + + def __init__(self, charm): + super().__init__(charm, "ingress") + # Observe the relation-changed hook event and bind + # self.on_relation_changed() to handle the event. + self.framework.observe(charm.on["ingress"].relation_changed, self._on_relation_changed) + self.charm = charm + + def _on_relation_changed(self, event): + """Handle a change to the ingress relation. + + Confirm we have the fields we expect to receive.""" + # `self.unit` isn't available here, so use `self.model.unit`. + if not self.model.unit.is_leader(): + return + + ingress_data = { + field: event.relation.data[event.app].get(field) + for field in REQUIRED_INGRESS_RELATION_FIELDS | OPTIONAL_INGRESS_RELATION_FIELDS + } + + missing_fields = sorted( + [ + field + for field in REQUIRED_INGRESS_RELATION_FIELDS + if ingress_data.get(field) is None + ] + ) + + if missing_fields: + logger.error( + "Missing required data fields for ingress relation: {}".format( + ", ".join(missing_fields) + ) + ) + self.model.unit.status = BlockedStatus( + "Missing fields for ingress: {}".format(", ".join(missing_fields)) + ) + + # Create an event that our charm can use to decide it's okay to + # configure the ingress. + self.charm.on.ingress_available.emit() diff --git a/charms/nova-k8s/lib/charms/observability_libs/v0/kubernetes_service_patch.py b/charms/nova-k8s/lib/charms/observability_libs/v0/kubernetes_service_patch.py new file mode 100644 index 00000000..3f609912 --- /dev/null +++ b/charms/nova-k8s/lib/charms/observability_libs/v0/kubernetes_service_patch.py @@ -0,0 +1,241 @@ +# Copyright 2021 Canonical Ltd. +# See LICENSE file for licensing details. + +"""# KubernetesServicePatch Library. + +This library is designed to enable developers to more simply patch the Kubernetes Service created +by Juju during the deployment of a sidecar charm. When sidecar charms are deployed, Juju creates a +service named after the application in the namespace (named after the Juju model). This service by +default contains a "placeholder" port, which is 65536/TCP. + +When modifying the default set of resources managed by Juju, one must consider the lifecycle of the +charm. In this case, any modifications to the default service (created during deployment), will +be overwritten during a charm upgrade. + +When intialised, this library binds a handler to the parent charm's `install` and `upgrade_charm` +events which applies the patch to the cluster. This should ensure that the service ports are +correct throughout the charm's life. + +The constructor simply takes a reference to the parent charm, and a list of tuples that each define +a port for the service, where each tuple contains: + +- a name for the port +- port for the service to listen on +- optionally: a targetPort for the service (the port in the container!) +- optionally: a nodePort for the service (for NodePort or LoadBalancer services only!) +- optionally: a name of the service (in case service name needs to be patched as well) + +## Getting Started + +To get started using the library, you just need to fetch the library using `charmcraft`. **Note +that you also need to add `lightkube` and `lightkube-models` to your charm's `requirements.txt`.** + +```shell +cd some-charm +charmcraft fetch-lib charms.observability_libs.v0.kubernetes_service_patch +echo <<-EOF >> requirements.txt +lightkube +lightkube-models +EOF +``` + +Then, to initialise the library: + +For ClusterIP services: +```python +# ... +from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch + +class SomeCharm(CharmBase): + def __init__(self, *args): + # ... + self.service_patcher = KubernetesServicePatch(self, [(f"{self.app.name}", 8080)]) + # ... +``` + +For LoadBalancer/NodePort services: +```python +# ... +from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch + +class SomeCharm(CharmBase): + def __init__(self, *args): + # ... + self.service_patcher = KubernetesServicePatch( + self, [(f"{self.app.name}", 443, 443, 30666)], "LoadBalancer" + ) + # ... +``` + +Additionally, you may wish to use mocks in your charm's unit testing to ensure that the library +does not try to make any API calls, or open any files during testing that are unlikely to be +present, and could break your tests. The easiest way to do this is during your test `setUp`: + +```python +# ... + +@patch("charm.KubernetesServicePatch", lambda x, y: None) +def setUp(self, *unused): + self.harness = Harness(SomeCharm) + # ... +``` +""" + +import logging +from types import MethodType +from typing import Literal, Sequence, Tuple, Union + +from lightkube import ApiError, Client +from lightkube.models.core_v1 import ServicePort, ServiceSpec +from lightkube.models.meta_v1 import ObjectMeta +from lightkube.resources.core_v1 import Service +from lightkube.types import PatchType +from ops.charm import CharmBase +from ops.framework import Object + +logger = logging.getLogger(__name__) + +# The unique Charmhub library identifier, never change it +LIBID = "0042f86d0a874435adef581806cddbbb" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 5 + +PortDefinition = Union[Tuple[str, int], Tuple[str, int, int], Tuple[str, int, int, int]] +ServiceType = Literal["ClusterIP", "LoadBalancer"] + + +class KubernetesServicePatch(Object): + """A utility for patching the Kubernetes service set up by Juju.""" + + def __init__( + self, + charm: CharmBase, + ports: Sequence[PortDefinition], + service_name: str = None, + service_type: ServiceType = "ClusterIP", + ): + """Constructor for KubernetesServicePatch. + + Args: + charm: the charm that is instantiating the library. + ports: a list of tuples (name, port, targetPort, nodePort) for every service port. + service_name: allows setting custom name to the patched service. If none given, + application name will be used. + service_type: desired type of K8s service. Default value is in line with ServiceSpec's + default value. + """ + super().__init__(charm, "kubernetes-service-patch") + self.charm = charm + self.service_name = service_name if service_name else self._app + self.service = self._service_object(ports, service_name, service_type) + + # Make mypy type checking happy that self._patch is a method + assert isinstance(self._patch, MethodType) + # Ensure this patch is applied during the 'install' and 'upgrade-charm' events + self.framework.observe(charm.on.install, self._patch) + self.framework.observe(charm.on.upgrade_charm, self._patch) + + def _service_object( + self, + ports: Sequence[PortDefinition], + service_name: str = None, + service_type: ServiceType = "ClusterIP", + ) -> Service: + """Creates a valid Service representation for Alertmanager. + + Args: + ports: a list of tuples of the form (name, port) or (name, port, targetPort) + or (name, port, targetPort, nodePort) for every service port. If the 'targetPort' + is omitted, it is assumed to be equal to 'port', with the exception of NodePort + and LoadBalancer services, where all port numbers have to be specified. + service_name: allows setting custom name to the patched service. If none given, + application name will be used. + service_type: desired type of K8s service. Default value is in line with ServiceSpec's + default value. + + Returns: + Service: A valid representation of a Kubernetes Service with the correct ports. + """ + if not service_name: + service_name = self._app + return Service( + apiVersion="v1", + kind="Service", + metadata=ObjectMeta( + namespace=self._namespace, + name=service_name, + labels={"app.kubernetes.io/name": service_name}, + ), + spec=ServiceSpec( + selector={"app.kubernetes.io/name": service_name}, + ports=[ + ServicePort( + name=p[0], + port=p[1], + targetPort=p[2] if len(p) > 2 else p[1], # type: ignore[misc] + nodePort=p[3] if len(p) > 3 else None, # type: ignore[arg-type, misc] + ) + for p in ports + ], + type=service_type, + ), + ) + + def _patch(self, _) -> None: + """Patch the Kubernetes service created by Juju to map the correct port. + + Raises: + PatchFailed: if patching fails due to lack of permissions, or otherwise. + """ + if not self.charm.unit.is_leader(): + return + + client = Client() + try: + client.patch(Service, self._app, self.service, patch_type=PatchType.MERGE) + except ApiError as e: + if e.status.code == 403: + logger.error("Kubernetes service patch failed: `juju trust` this application.") + else: + logger.error("Kubernetes service patch failed: %s", str(e)) + else: + logger.info("Kubernetes service '%s' patched successfully", self._app) + + def is_patched(self) -> bool: + """Reports if the service patch has been applied. + + Returns: + bool: A boolean indicating if the service patch has been applied. + """ + client = Client() + # Get the relevant service from the cluster + service = client.get(Service, name=self.service_name, namespace=self._namespace) + # Construct a list of expected ports, should the patch be applied + expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports] + # Construct a list in the same manner, using the fetched service + fetched_ports = [(p.port, p.targetPort) for p in service.spec.ports] # type: ignore[attr-defined] # noqa: E501 + return expected_ports == fetched_ports + + @property + def _app(self) -> str: + """Name of the current Juju application. + + Returns: + str: A string containing the name of the current Juju application. + """ + return self.charm.app.name + + @property + def _namespace(self) -> str: + """The Kubernetes namespace we're running in. + + Returns: + str: A string containing the name of the current Kubernetes namespace. + """ + with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f: + return f.read().strip() diff --git a/charms/nova-k8s/lib/charms/sunbeam_keystone_operator/v0/identity_service.py b/charms/nova-k8s/lib/charms/sunbeam_keystone_operator/v0/identity_service.py new file mode 100644 index 00000000..7a7f4e43 --- /dev/null +++ b/charms/nova-k8s/lib/charms/sunbeam_keystone_operator/v0/identity_service.py @@ -0,0 +1,470 @@ +"""IdentityServiceProvides and Requires module. + + +This library contains the Requires and Provides classes for handling +the identity_service interface. + +Import `IdentityServiceRequires` in your charm, with the charm object and the +relation name: + - self + - "identity_service" + +Also provide additional parameters to the charm object: + - service + - internal_url + - public_url + - admin_url + - region + - username + - vhost + +Two events are also available to respond to: + - connected + - ready + - goneaway + +A basic example showing the usage of this relation follows: + +``` +from charms.sunbeam_sunbeam_identity_service_operator.v0.identity_service import IdentityServiceRequires + +class IdentityServiceClientCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + # IdentityService Requires + self.identity_service = IdentityServiceRequires( + self, "identity_service", + service = "my-service" + internal_url = "http://internal-url" + public_url = "http://public-url" + admin_url = "http://admin-url" + region = "region" + ) + self.framework.observe( + self.identity_service.on.connected, self._on_identity_service_connected) + self.framework.observe( + self.identity_service.on.ready, self._on_identity_service_ready) + self.framework.observe( + self.identity_service.on.goneaway, self._on_identity_service_goneaway) + + def _on_identity_service_connected(self, event): + '''React to the IdentityService connected event. + + This event happens when n IdentityService relation is added to the + model before credentials etc have been provided. + ''' + # Do something before the relation is complete + pass + + def _on_identity_service_ready(self, event): + '''React to the IdentityService ready event. + + The IdentityService interface will use the provided config for the + request to the identity server. + ''' + # IdentityService Relation is ready. Do something with the completed relation. + pass + + def _on_identity_service_goneaway(self, event): + '''React to the IdentityService goneaway event. + + This event happens when an IdentityService relation is removed. + ''' + # IdentityService Relation has goneaway. shutdown services or suchlike + pass +``` +""" + +# The unique Charmhub library identifier, never change it +LIBID = "6a7cb19b98314ecf916e3fcb02708608" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 1 + +import json +import logging +import requests + +from ops.framework import ( + StoredState, + EventBase, + ObjectEvents, + EventSource, + Object, +) + +from ops.model import Relation + +from typing import List + +logger = logging.getLogger(__name__) + + +class IdentityServiceConnectedEvent(EventBase): + """IdentityService connected Event.""" + + pass + + +class IdentityServiceReadyEvent(EventBase): + """IdentityService ready for use Event.""" + + pass + + +class IdentityServiceGoneAwayEvent(EventBase): + """IdentityService relation has gone-away Event""" + + pass + + +class IdentityServiceServerEvents(ObjectEvents): + """Events class for `on`""" + + connected = EventSource(IdentityServiceConnectedEvent) + ready = EventSource(IdentityServiceReadyEvent) + goneaway = EventSource(IdentityServiceGoneAwayEvent) + + +class IdentityServiceRequires(Object): + """ + IdentityServiceRequires class + """ + + on = IdentityServiceServerEvents() + _stored = StoredState() + + def __init__(self, charm, relation_name: str, service_endpoints: dict, + region: str): + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + self.service_endpoints = service_endpoints + self.region = region + self.framework.observe( + self.charm.on[relation_name].relation_joined, + self._on_identity_service_relation_joined, + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, + self._on_identity_service_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_departed, + self._on_identity_service_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_broken, + self._on_identity_service_relation_broken, + ) + + def _on_identity_service_relation_joined(self, event): + """IdentityService relation joined.""" + logging.debug("IdentityService on_joined") + self.on.connected.emit() + self.register_services( + self.service_endpoints, + self.region) + + def _on_identity_service_relation_changed(self, event): + """IdentityService relation changed.""" + logging.debug("IdentityService on_changed") + try: + self.service_password + self.on.ready.emit() + except AttributeError: + pass + + def _on_identity_service_relation_broken(self, event): + """IdentityService relation broken.""" + logging.debug("IdentityService on_broken") + self.on.goneaway.emit() + + @property + def _identity_service_rel(self) -> Relation: + """The IdentityService relation.""" + return self.framework.model.get_relation(self.relation_name) + + def get_remote_app_data(self, key: str) -> str: + """Return the value for the given key from remote app data.""" + data = self._identity_service_rel.data[self._identity_service_rel.app] + return data.get(key) + + @property + def api_version(self) -> str: + """Return the api_version.""" + return self.get_remote_app_data('api-version') + + @property + def auth_host(self) -> str: + """Return the auth_host.""" + return self.get_remote_app_data('auth-host') + + @property + def auth_port(self) -> str: + """Return the auth_port.""" + return self.get_remote_app_data('auth-port') + + @property + def auth_protocol(self) -> str: + """Return the auth_protocol.""" + return self.get_remote_app_data('auth-protocol') + + @property + def internal_host(self) -> str: + """Return the internal_host.""" + return self.get_remote_app_data('internal-host') + + @property + def internal_port(self) -> str: + """Return the internal_port.""" + return self.get_remote_app_data('internal-port') + + @property + def internal_protocol(self) -> str: + """Return the internal_protocol.""" + return self.get_remote_app_data('internal-protocol') + + @property + def admin_domain_name(self) -> str: + """Return the admin_domain_name.""" + return self.get_remote_app_data('admin-domain-name') + + @property + def admin_domain_id(self) -> str: + """Return the admin_domain_id.""" + return self.get_remote_app_data('admin-domain-id') + + @property + def admin_project_name(self) -> str: + """Return the admin_project_name.""" + return self.get_remote_app_data('admin-project-name') + + @property + def admin_project_id(self) -> str: + """Return the admin_project_id.""" + return self.get_remote_app_data('admin-project-id') + + @property + def admin_user_name(self) -> str: + """Return the admin_user_name.""" + return self.get_remote_app_data('admin-user-name') + + @property + def admin_user_id(self) -> str: + """Return the admin_user_id.""" + return self.get_remote_app_data('admin-user-id') + + @property + def service_domain_name(self) -> str: + """Return the service_domain_name.""" + return self.get_remote_app_data('service-domain-name') + + @property + def service_domain_id(self) -> str: + """Return the service_domain_id.""" + return self.get_remote_app_data('service-domain-id') + + @property + def service_host(self) -> str: + """Return the service_host.""" + return self.get_remote_app_data('service-host') + + @property + def service_password(self) -> str: + """Return the service_password.""" + return self.get_remote_app_data('service-password') + + @property + def service_port(self) -> str: + """Return the service_port.""" + return self.get_remote_app_data('service-port') + + @property + def service_protocol(self) -> str: + """Return the service_protocol.""" + return self.get_remote_app_data('service-protocol') + + @property + def service_project_name(self) -> str: + """Return the service_project_name.""" + return self.get_remote_app_data('service-project-name') + + @property + def service_project_id(self) -> str: + """Return the service_project_id.""" + return self.get_remote_app_data('service-project-id') + + @property + def service_user_name(self) -> str: + """Return the service_user_name.""" + return self.get_remote_app_data('service-user-name') + + @property + def service_user_id(self) -> str: + """Return the service_user_id.""" + return self.get_remote_app_data('service-user-id') + + + def register_services(self, service_endpoints: dict, + region: str) -> None: + """Request access to the IdentityService server.""" + if self.model.unit.is_leader(): + logging.debug("Requesting service registration") + app_data = self._identity_service_rel.data[self.charm.app] + app_data["service-endpoints"] = json.dumps(service_endpoints) + app_data["region"] = region + + +class HasIdentityServiceClientsEvent(EventBase): + """Has IdentityServiceClients Event.""" + + pass + + +class ReadyIdentityServiceClientsEvent(EventBase): + """IdentityServiceClients Ready Event.""" + + def __init__(self, handle, relation_id, relation_name, service_endpoints, + region, client_app_name): + super().__init__(handle) + self.relation_id = relation_id + self.relation_name = relation_name + self.service_endpoints = service_endpoints + self.region = region + self.client_app_name = client_app_name + + + def snapshot(self): + return { + "relation_id": self.relation_id, + "relation_name": self.relation_name, + "service_endpoints": self.service_endpoints, + "client_app_name": self.client_app_name, + "region": self.region} + + def restore(self, snapshot): + super().restore(snapshot) + self.relation_id = snapshot["relation_id"] + self.relation_name = snapshot["relation_name"] + self.service_endpoints = snapshot["service_endpoints"] + self.region = snapshot["region"] + self.client_app_name = snapshot["client_app_name"] + + +class IdentityServiceClientEvents(ObjectEvents): + """Events class for `on`""" + + has_identity_service_clients = EventSource(HasIdentityServiceClientsEvent) + ready_identity_service_clients = EventSource(ReadyIdentityServiceClientsEvent) + + +class IdentityServiceProvides(Object): + """ + IdentityServiceProvides class + """ + + on = IdentityServiceClientEvents() + _stored = StoredState() + + def __init__(self, charm, relation_name): + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + self.framework.observe( + self.charm.on[relation_name].relation_joined, + self._on_identity_service_relation_joined, + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, + self._on_identity_service_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_broken, + self._on_identity_service_relation_broken, + ) + + def _on_identity_service_relation_joined(self, event): + """Handle IdentityService joined.""" + logging.debug("IdentityService on_joined") + self.on.has_identity_service_clients.emit() + + def _on_identity_service_relation_changed(self, event): + """Handle IdentityService changed.""" + logging.debug("IdentityService on_changed") + REQUIRED_KEYS = [ + 'service-endpoints', + 'region'] + + values = [ + event.relation.data[event.relation.app].get(k) + for k in REQUIRED_KEYS ] + # Validate data on the relation + if all(values): + print(event.relation.id) + print(event.relation.name) + service_eps = json.loads( + event.relation.data[event.relation.app]['service-endpoints']) + self.on.ready_identity_service_clients.emit( + event.relation.id, + event.relation.name, + service_eps, + event.relation.data[event.relation.app]['region'], + event.relation.app.name) + + def _on_identity_service_relation_broken(self, event): + """Handle IdentityService broken.""" + logging.debug("IdentityServiceProvides on_departed") + # TODO clear data on the relation + + def set_identity_service_credentials(self, relation_name: int, + relation_id: str, + api_version: str, + auth_host: str, + auth_port: str, + auth_protocol: str, + internal_host: str, + internal_port: str, + internal_protocol: str, + service_host: str, + service_port: str, + service_protocol: str, + admin_domain: str, + admin_project: str, + admin_user: str, + service_domain: str, + service_password: str, + service_project: str, + service_user: str): + logging.debug("Setting identity_service connection information.") + for relation in self.framework.model.relations[relation_name]: + if relation.id == relation_id: + _identity_service_rel = relation + app_data = _identity_service_rel.data[self.charm.app] + app_data["api-version"] = api_version + app_data["auth-host"] = auth_host + app_data["auth-port"] = str(auth_port) + app_data["auth-protocol"] = auth_protocol + app_data["internal-host"] = internal_host + app_data["internal-port"] = str(internal_port) + app_data["internal-protocol"] = internal_protocol + app_data["service-host"] = service_host + app_data["service-port"] = str(service_port) + app_data["service-protocol"] = service_protocol + app_data["admin-domain-name"] = admin_domain.name + app_data["admin-domain-id"] = admin_domain.id + app_data["admin-project-name"] = admin_project.name + app_data["admin-project-id"] = admin_project.id + app_data["admin-user-name"] = admin_user.name + app_data["admin-user-id"] = admin_user.id + app_data["service-domain-name"] = service_domain.name + app_data["service-domain-id"] = service_domain.id + app_data["service-project-name"] = service_project.name + app_data["service-project-id"] = service_project.id + app_data["service-user-name"] = service_user.name + app_data["service-user-id"] = service_user.id + app_data["service-password"] = service_password diff --git a/charms/nova-k8s/lib/charms/sunbeam_mysql_k8s/v0/mysql.py b/charms/nova-k8s/lib/charms/sunbeam_mysql_k8s/v0/mysql.py new file mode 100644 index 00000000..69d4833f --- /dev/null +++ b/charms/nova-k8s/lib/charms/sunbeam_mysql_k8s/v0/mysql.py @@ -0,0 +1,165 @@ +""" +## Overview + +This document explains how to integrate with the MySQL charm for the purposes of consuming a mysql database. It also explains how alternative implementations of the MySQL charm may maintain the same interface and be backward compatible with all currently integrated charms. Finally this document is the authoritative reference on the structure of relation data that is shared between MySQL charms and any other charm that intends to use the database. + + +## Consumer Library Usage + +The MySQL charm library uses the [Provider and Consumer](https://ops.readthedocs.io/en/latest/#module-ops.relation) objects from the Operator Framework. Charms that would like to use a MySQL database must use the `MySQLConsumer` object from the charm library. Using the `MySQLConsumer` object requires instantiating it, typically in the constructor of your charm. The `MySQLConsumer` constructor requires the name of the relation over which a database will be used. This relation must use the `mysql_datastore` interface. In addition the constructor also requires a `consumes` specification, which is a dictionary with key `mysql` (also see Provider Library Usage below) and a value that represents the minimum acceptable version of MySQL. This version string can be in any format that is compatible with the Python [Semantic Version module](https://pypi.org/project/semantic-version/). For example, assuming your charm consumes a database over a rlation named "monitoring", you may instantiate `MySQLConsumer` as follows: + + from charms.mysql_k8s.v0.mysql import MySQLConsumer + def __init__(self, *args): + super().__init__(*args) + ... + self.mysql_consumer = MySQLConsumer( + self, "monitoring", {"mysql": ">=8"} + ) + ... + +This example hard codes the consumes dictionary argument containing the minimal MySQL version required, however you may want to consider generating this dictionary by some other means, such as a `self.consumes` property in your charm. This is because the minimum required MySQL version may change when you upgrade your charm. Of course it is expected that you will keep this version string updated as you develop newer releases of your charm. If the version string can be determined at run time by inspecting the actual deployed version of your charmed application, this would be ideal. +An instantiated `MySQLConsumer` object may be used to request new databases using the `new_database()` method. This method requires no arguments unless you require multiple databases. If multiple databases are requested, you must provide a unique `name_suffix` argument. For example: + + def _on_database_relation_joined(self, event): + self.mysql_consumer.new_database(name_suffix="db1") + self.mysql_consumer.new_database(name_suffix="db2") + +The `address`, `port`, `databases`, and `credentials` methods can all be called +to get the relevant information from the relation data. +""" + +# !/usr/bin/env python3 +# Copyright 2021 Canonical Ltd. +# See LICENSE file for licensing details. + +import json +import uuid +import logging +from ops.relation import ConsumerBase + +from ops.framework import ( + StoredState, + EventBase, + ObjectEvents, + EventSource, + Object, +) + +# The unique Charmhub library identifier, never change it +LIBID = "1fdc567d7095465990dc1f9be80461fd" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 1 + +logger = logging.getLogger(__name__) + +class DatabaseConnectedEvent(EventBase): + """Database connected Event.""" + + pass + + +class DatabaseReadyEvent(EventBase): + """Database ready for use Event.""" + + pass + + +class DatabaseGoneAwayEvent(EventBase): + """Database relation has gone-away Event""" + + pass + + +class DatabaseServerEvents(ObjectEvents): + """Events class for `on`""" + + connected = EventSource(DatabaseConnectedEvent) + ready = EventSource(DatabaseReadyEvent) + goneaway = EventSource(DatabaseGoneAwayEvent) + + +class MySQLConsumer(Object): + """ + MySQLConsumer lib class + """ + + on = DatabaseServerEvents() + + def __init__(self, charm, relation_name: str, databases: list): + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + self.request_databases = databases + self.framework.observe( + self.charm.on[relation_name].relation_joined, + self._on_database_relation_joined, + ) + + def _on_database_relation_joined(self, event): + """AMQP relation joined.""" + logging.debug("DatabaseRequires on_joined") + self.on.connected.emit() + self.request_access(self.request_databases) + + def databases(self, rel_id=None) -> list: + """ + List of currently available databases + Returns: + list: list of database names + """ + + rel = self.framework.model.get_relation(self.relation_name, rel_id) + relation_data = rel.data[rel.app] + dbs = relation_data.get("databases") + databases = json.loads(dbs) if dbs else [] + + return databases + + def credentials(self, rel_id=None) -> dict: + """ + Dictionary of credential information to access databases + Returns: + dict: dictionary of credential information including username, + password and address + """ + rel = self.framework.model.get_relation(self.relation_name, rel_id) + relation_data = rel.data[rel.app] + data = relation_data.get("data") + data = json.loads(data) if data else {} + credentials = data.get("credentials") + + return credentials + + def new_database(self, rel_id=None, name_suffix=""): + """ + Request creation of an additional database + """ + if not self.charm.unit.is_leader(): + return + + rel = self.framework.model.get_relation(self.relation_name, rel_id) + + if name_suffix: + name_suffix = "_{}".format(name_suffix) + + rid = str(uuid.uuid4()).split("-")[-1] + db_name = "db_{}_{}_{}".format(rel.id, rid, name_suffix) + logger.debug("CLIENT REQUEST %s", db_name) + rel_data = rel.data[self.charm.app] + dbs = rel_data.get("databases") + dbs = json.loads(dbs) if dbs else [] + dbs.append(db_name) + rel.data[self.charm.app]["databases"] = json.dumps(dbs) + + def request_access(self, databases: list) -> None: + """Request access to the AMQP server.""" + if self.model.unit.is_leader(): + logging.debug("Requesting AMQP user and vhost") + if databases: + rel = self.framework.model.get_relation(self.relation_name) + rel.data[self.charm.app]["databases"] = json.dumps(databases) diff --git a/charms/nova-k8s/lib/charms/sunbeam_rabbitmq_operator/v0/amqp.py b/charms/nova-k8s/lib/charms/sunbeam_rabbitmq_operator/v0/amqp.py new file mode 100644 index 00000000..4c1540de --- /dev/null +++ b/charms/nova-k8s/lib/charms/sunbeam_rabbitmq_operator/v0/amqp.py @@ -0,0 +1,314 @@ +"""AMQPProvides and Requires module. + + +This library contains the Requires and Provides classes for handling +the amqp interface. + +Import `AMQPRequires` in your charm, with the charm object and the +relation name: + - self + - "amqp" + +Also provide two additional parameters to the charm object: + - username + - vhost + +Two events are also available to respond to: + - connected + - ready + - goneaway + +A basic example showing the usage of this relation follows: + +``` +from charms.sunbeam_rabbitmq_operator.v0.amqp import AMQPRequires + +class AMQPClientCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + # AMQP Requires + self.amqp = AMQPRequires( + self, "amqp", + username="myusername", + vhost="vhostname" + ) + self.framework.observe( + self.amqp.on.connected, self._on_amqp_connected) + self.framework.observe( + self.amqp.on.ready, self._on_amqp_ready) + self.framework.observe( + self.amqp.on.goneaway, self._on_amqp_goneaway) + + def _on_amqp_connected(self, event): + '''React to the AMQP connected event. + + This event happens when n AMQP relation is added to the + model before credentials etc have been provided. + ''' + # Do something before the relation is complete + pass + + def _on_amqp_ready(self, event): + '''React to the AMQP ready event. + + The AMQP interface will use the provided username and vhost for the + request to the rabbitmq server. + ''' + # AMQP Relation is ready. Do something with the completed relation. + pass + + def _on_amqp_goneaway(self, event): + '''React to the AMQP goneaway event. + + This event happens when an AMQP relation is removed. + ''' + # AMQP Relation has goneaway. shutdown services or suchlike + pass +``` +""" + +# The unique Charmhub library identifier, never change it +LIBID = "ab1414b6baf044f099caf9c117f1a101" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 3 + +import logging +import requests + +from ops.framework import ( + StoredState, + EventBase, + ObjectEvents, + EventSource, + Object, +) + +from ops.model import Relation + +from typing import List + +logger = logging.getLogger(__name__) + + +class AMQPConnectedEvent(EventBase): + """AMQP connected Event.""" + + pass + + +class AMQPReadyEvent(EventBase): + """AMQP ready for use Event.""" + + pass + + +class AMQPGoneAwayEvent(EventBase): + """AMQP relation has gone-away Event""" + + pass + + +class AMQPServerEvents(ObjectEvents): + """Events class for `on`""" + + connected = EventSource(AMQPConnectedEvent) + ready = EventSource(AMQPReadyEvent) + goneaway = EventSource(AMQPGoneAwayEvent) + + +class AMQPRequires(Object): + """ + AMQPRequires class + """ + + on = AMQPServerEvents() + _stored = StoredState() + + def __init__(self, charm, relation_name: str, username: str, vhost: str): + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + self.username = username + self.vhost = vhost + self.framework.observe( + self.charm.on[relation_name].relation_joined, + self._on_amqp_relation_joined, + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, + self._on_amqp_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_departed, + self._on_amqp_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_broken, + self._on_amqp_relation_broken, + ) + + def _on_amqp_relation_joined(self, event): + """AMQP relation joined.""" + logging.debug("RabbitMQAMQPRequires on_joined") + self.on.connected.emit() + self.request_access(self.username, self.vhost) + + def _on_amqp_relation_changed(self, event): + """AMQP relation changed.""" + logging.debug("RabbitMQAMQPRequires on_changed") + if self.password: + self.on.ready.emit() + + def _on_amqp_relation_broken(self, event): + """AMQP relation broken.""" + logging.debug("RabbitMQAMQPRequires on_broken") + self.on.goneaway.emit() + + @property + def _amqp_rel(self) -> Relation: + """The AMQP relation.""" + return self.framework.model.get_relation(self.relation_name) + + @property + def password(self) -> str: + """Return the AMQP password from the server side of the relation.""" + return self._amqp_rel.data[self._amqp_rel.app].get("password") + + @property + def hostname(self) -> str: + """Return the hostname from the AMQP relation""" + return self._amqp_rel.data[self._amqp_rel.app].get("hostname") + + @property + def ssl_port(self) -> str: + """Return the SSL port from the AMQP relation""" + return self._amqp_rel.data[self._amqp_rel.app].get("ssl_port") + + @property + def ssl_ca(self) -> str: + """Return the SSL port from the AMQP relation""" + return self._amqp_rel.data[self._amqp_rel.app].get("ssl_ca") + + @property + def hostnames(self) -> List[str]: + """Return a list of remote RMQ hosts from the AMQP relation""" + _hosts = [] + for unit in self._amqp_rel.units: + _hosts.append(self._amqp_rel.data[unit].get("ingress-address")) + return _hosts + + def request_access(self, username: str, vhost: str) -> None: + """Request access to the AMQP server.""" + if self.model.unit.is_leader(): + logging.debug("Requesting AMQP user and vhost") + self._amqp_rel.data[self.charm.app]["username"] = username + self._amqp_rel.data[self.charm.app]["vhost"] = vhost + + +class HasAMQPClientsEvent(EventBase): + """Has AMQPClients Event.""" + + pass + + +class ReadyAMQPClientsEvent(EventBase): + """AMQPClients Ready Event.""" + + pass + + +class AMQPClientEvents(ObjectEvents): + """Events class for `on`""" + + has_amqp_clients = EventSource(HasAMQPClientsEvent) + ready_amqp_clients = EventSource(ReadyAMQPClientsEvent) + + +class AMQPProvides(Object): + """ + AMQPProvides class + """ + + on = AMQPClientEvents() + _stored = StoredState() + + def __init__(self, charm, relation_name): + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + self.framework.observe( + self.charm.on[relation_name].relation_joined, + self._on_amqp_relation_joined, + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, + self._on_amqp_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_broken, + self._on_amqp_relation_broken, + ) + + def _on_amqp_relation_joined(self, event): + """Handle AMQP joined.""" + logging.debug("RabbitMQAMQPProvides on_joined") + self.on.has_amqp_clients.emit() + + def _on_amqp_relation_changed(self, event): + """Handle AMQP changed.""" + logging.debug("RabbitMQAMQPProvides on_changed") + # Validate data on the relation + if self.username(event) and self.vhost(event): + self.on.ready_amqp_clients.emit() + if self.charm.unit.is_leader(): + self.set_amqp_credentials( + event, self.username(event), self.vhost(event) + ) + + def _on_amqp_relation_broken(self, event): + """Handle AMQP broken.""" + logging.debug("RabbitMQAMQPProvides on_departed") + # TODO clear data on the relation + + def username(self, event): + """Return the AMQP username from the client side of the relation.""" + return event.relation.data[event.relation.app].get("username") + + def vhost(self, event): + """Return the AMQP vhost from the client side of the relation.""" + return event.relation.data[event.relation.app].get("vhost") + + def set_amqp_credentials(self, event, username, vhost): + """Set AMQP Credentials. + + :param event: The current event + :type EventsBase + :param username: The requested username + :type username: str + :param vhost: The requested vhost + :type vhost: str + :returns: None + :rtype: None + """ + # TODO: Can we move this into the charm code? + # TODO TLS Support. Existing interfaces set ssl_port and ssl_ca + logging.debug("Setting amqp connection information.") + try: + if not self.charm.does_vhost_exist(vhost): + self.charm.create_vhost(vhost) + password = self.charm.create_user(username) + self.charm.set_user_permissions(username, vhost) + event.relation.data[self.charm.app]["password"] = password + event.relation.data[self.charm.app][ + "hostname" + ] = self.charm.hostname + except requests.exceptions.ConnectionError as e: + logging.warning( + "Rabbitmq is not ready. Defering. Errno: {}".format(e.errno) + ) + event.defer() diff --git a/charms/nova-k8s/metadata.yaml b/charms/nova-k8s/metadata.yaml new file mode 100644 index 00000000..71509478 --- /dev/null +++ b/charms/nova-k8s/metadata.yaml @@ -0,0 +1,76 @@ +name: sunbeam-nova-operator +summary: OpenStack Compute - Nova cloud controller service +maintainer: OpenStack Charmers +description: | + OpenStack is a reliable cloud infrastructure. Its mission is to produce + the ubiquitous cloud computing platform that will meet the needs of public + and private cloud providers regardless of size, by being simple to implement + and massively scalable. + . + OpenStack Compute, codenamed Nova, is a cloud computing fabric controller. In + addition to its "native" API (the OpenStack API), it also supports the Amazon + EC2 API. + . + This charm provides the cloud controller service for OpenStack Nova and includes + nova-scheduler, nova-api and nova-conductor services. +version: 3 +bases: + - name: ubuntu + channel: 20.04/stable +tags: + - openstack + +containers: + nova-api: + resource: nova-api-image + nova-scheduler: + resource: nova-scheduler-image + nova-conductor: + resource: nova-conductor-image + nova-metadata: + resource: nova-metadata-image + +resources: + nova-api-image: + type: oci-image + description: OCI image for OpenStack Nova API (kolla/nova-api-image) + nova-scheduler-image: + type: oci-image + description: OCI image for OpenStack Nova API (kolla/nova-scheduler-image) + nova-conductor-image: + type: oci-image + description: OCI image for OpenStack Nova API (kolla/nova-conductor-image) + nova-metadata-image: + type: oci-image + description: OCI image for OpenStack Nova API (kolla/nova-metadata-image) + +requires: + ingress: + interface: ingress + shared-db: + interface: mysql_datastore + limit: 1 + amqp: + interface: rabbitmq + image-service: + interface: glance + identity-service: + interface: keystone + cloud-compute: + interface: nova-compute + cinder-volume-service: + interface: cinder + neutron-network-service: + interface: neutron + neutron-api: + interface: neutron-api + placement: + interface: placement + +provides: + cloud-controller: + interface: nova + +peers: + peers: + interface: nova-peer diff --git a/charms/nova-k8s/requirements-dev.txt b/charms/nova-k8s/requirements-dev.txt new file mode 100644 index 00000000..4f2a3f5b --- /dev/null +++ b/charms/nova-k8s/requirements-dev.txt @@ -0,0 +1,3 @@ +-r requirements.txt +coverage +flake8 diff --git a/charms/nova-k8s/requirements.txt b/charms/nova-k8s/requirements.txt new file mode 100644 index 00000000..32a3d3b9 --- /dev/null +++ b/charms/nova-k8s/requirements.txt @@ -0,0 +1,10 @@ +# ops >= 1.2.0 +jinja2 +git+https://github.com/canonical/operator@2875e73e#egg=ops +git+https://opendev.org/openstack/charm-ops-openstack#egg=ops_openstack +git+https://github.com/openstack-charmers/advanced-sunbeam-openstack#egg=advanced_sunbeam_openstack +lightkube +# These are only needeed if the charm relates to ceph +git+https://github.com/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client +# Charmhelpers is only present as interface_ceph_client uses it. +git+https://github.com/juju/charm-helpers.git#egg=charmhelpers diff --git a/charms/nova-k8s/run_tests b/charms/nova-k8s/run_tests new file mode 100755 index 00000000..90db638b --- /dev/null +++ b/charms/nova-k8s/run_tests @@ -0,0 +1,17 @@ +#!/bin/sh -e +# Copyright 2022 liam +# See LICENSE file for licensing details. + +if [ -z "$VIRTUAL_ENV" -a -d venv/ ]; then + . venv/bin/activate +fi + +if [ -z "$PYTHONPATH" ]; then + export PYTHONPATH="lib:src" +else + export PYTHONPATH="lib:src:$PYTHONPATH" +fi + +flake8 +coverage run --branch --source=src -m unittest -v "$@" +coverage report -m diff --git a/charms/nova-k8s/src/charm.py b/charms/nova-k8s/src/charm.py new file mode 100644 index 00000000..18d7d356 --- /dev/null +++ b/charms/nova-k8s/src/charm.py @@ -0,0 +1,208 @@ +#!/usr/bin/env python3 +"""Nova Operator Charm. + +This charm provide Nova services as part of an OpenStack deployment +""" + +import logging +from typing import List + +from ops.framework import StoredState +from ops.main import main + +import advanced_sunbeam_openstack.cprocess as sunbeam_cprocess +import advanced_sunbeam_openstack.charm as sunbeam_charm +import advanced_sunbeam_openstack.core as sunbeam_core +import advanced_sunbeam_openstack.container_handlers as sunbeam_chandlers +import advanced_sunbeam_openstack.relation_handlers as sunbeam_rhandlers +import advanced_sunbeam_openstack.config_contexts as sunbeam_ctxts + +from charms.observability_libs.v0.kubernetes_service_patch \ + import KubernetesServicePatch + +logger = logging.getLogger(__name__) + +NOVA_SCHEDULER_CONTAINER = "nova-scheduler" +NOVA_CONDUCTOR_CONTAINER = "nova-conductor" + +class WSGINovaAPIConfigContext(sunbeam_ctxts.ConfigContext): + """Configuration context for WSGI configuration.""" + + def context(self) -> dict: + """WSGI configuration options.""" + log_svc_name = self.charm.service_name.replace('-', '_') + return { + "name": self.charm.service_name, + "public_port": 8775, + "user": self.charm.service_user, + "group": self.charm.service_group, + "wsgi_admin_script": '/usr/bin/nova-metadata-wsgi', + "wsgi_public_script": '/usr/bin/nova-metadata-wsgi', + "error_log": f"/var/log/apache2/{log_svc_name}_error.log", + "custom_log": f"/var/log/apache2/{log_svc_name}_access.log", + } + + +class NovaSchedulerPebbleHandler(sunbeam_chandlers.ServicePebbleHandler): + + def get_layer(self): + """Apache service + + :returns: pebble layer configuration for scheduler service + :rtype: dict + """ + return { + "summary": "nova scheduler layer", + "description": "pebble configuration for nova services", + "services": { + "nova-scheduler": { + "override": "replace", + "summary": "Nova Scheduler", + "command": "nova-scheduler", + "startup": "enabled" + } + } + } + + def default_container_configs(self): + return [ + sunbeam_core.ContainerConfigFile( + [self.container_name], + '/etc/nova/nova.conf', + 'nova', + 'nova')] + + +class NovaConductorPebbleHandler(sunbeam_chandlers.ServicePebbleHandler): + + def get_layer(self): + """Apache service + + :returns: pebble layer configuration for conductor service + :rtype: dict + """ + return { + "summary": "nova conductor layer", + "description": "pebble configuration for nova services", + "services": { + "nova-conductor": { + "override": "replace", + "summary": "Nova Conductor", + "command": "nova-conductor", + "startup": "enabled" + } + } + } + + def default_container_configs(self): + return [ + sunbeam_core.ContainerConfigFile( + [self.container_name], + '/etc/nova/nova.conf', + 'nova', + 'nova')] + + +class NovaOperatorCharm(sunbeam_charm.OSBaseOperatorAPICharm): + """Charm the service.""" + + _state = StoredState() + service_name = "nova-api" + wsgi_admin_script = '/usr/bin/nova-api-wsgi' + wsgi_public_script = '/usr/bin/nova-api-wsgi' + + db_sync_cmds = [ + ['sudo', '-u', 'nova', 'nova-manage', 'api_db', 'sync'], + ['sudo', '-u', 'nova', 'nova-manage', 'cell_v2', 'map_cell0'], + ['sudo', '-u', 'nova', 'nova-manage', 'db', 'sync']] + + def __init__(self, framework): + super().__init__(framework) + self.service_patcher = KubernetesServicePatch( + self, + [ + ('public', self.default_public_ingress_port), + ] + ) + + @property + def service_conf(self) -> str: + """Service default configuration file.""" + return f"/etc/nova/nova.conf" + + @property + def service_user(self) -> str: + """Service user file and directory ownership.""" + return 'nova' + + @property + def service_group(self) -> str: + """Service group file and directory ownership.""" + return 'nova' + + @property + def service_endpoints(self): + return [ + { + 'service_name': 'nova', + 'type': 'compute', + 'description': "OpenStack Compute", + 'internal_url': f'{self.internal_url}/v2.1', + 'public_url': f'{self.public_url}/v2.1', + 'admin_url': f'{self.admin_url}/v2.1'}] + + @property + def default_public_ingress_port(self): + return 8774 + + @property + def databases(self) -> List[str]: + """Databases needed to support this charm. + + Need to override the default to specify three dbs. + """ + return ["nova_api", "nova", "nova_cell0"] + + def get_pebble_handlers(self): + pebble_handlers = super().get_pebble_handlers() + pebble_handlers.extend([ + NovaSchedulerPebbleHandler( + self, + NOVA_SCHEDULER_CONTAINER, + 'nova-scheduler', + [], + self.template_dir, + self.openstack_release, + self.configure_charm), + NovaConductorPebbleHandler( + self, + NOVA_CONDUCTOR_CONTAINER, + 'nova-conductor', + [], + self.template_dir, + self.openstack_release, + self.configure_charm)]) + return pebble_handlers + + @property + def config_contexts(self) -> List[sunbeam_ctxts.ConfigContext]: + """Generate list of configuration adapters for the charm.""" + _cadapters = super().config_contexts + _cadapters.extend( + [ + WSGINovaAPIConfigContext( + self, 'wsgi_nova_api', + ) + ] + ) + return _cadapters + +class NovaWallabyOperatorCharm(NovaOperatorCharm): + + openstack_release = 'wallaby' + +if __name__ == "__main__": + # Note: use_juju_for_storage=True required per + # https://github.com/canonical/operator/issues/506 + main(NovaWallabyOperatorCharm, use_juju_for_storage=True) + diff --git a/charms/nova-k8s/src/templates/ceph.conf.j2 b/charms/nova-k8s/src/templates/ceph.conf.j2 new file mode 100644 index 00000000..c293ae90 --- /dev/null +++ b/charms/nova-k8s/src/templates/ceph.conf.j2 @@ -0,0 +1,22 @@ +############################################################################### +# [ WARNING ] +# ceph configuration file maintained in aso +# local changes may be overwritten. +############################################################################### +[global] +{% if ceph.auth -%} +auth_supported = {{ ceph.auth }} +mon host = {{ ceph.mon_hosts }} +{% endif -%} +keyring = /etc/ceph/$cluster.$name.keyring +log to syslog = false +err to syslog = false +clog to syslog = false +{% if ceph.rbd_features %} +rbd default features = {{ ceph.rbd_features }} +{% endif %} + +[client] +{% if ceph_config.rbd_default_data_pool -%} +rbd default data pool = {{ ceph_config.rbd_default_data_pool }} +{% endif %} diff --git a/charms/nova-k8s/src/templates/nova.conf.j2 b/charms/nova-k8s/src/templates/nova.conf.j2 new file mode 100644 index 00000000..9143a200 --- /dev/null +++ b/charms/nova-k8s/src/templates/nova.conf.j2 @@ -0,0 +1,5409 @@ +[DEFAULT] +log_dir = /var/log/nova +lock_path = /var/lock/nova +state_path = /var/lib/nova + +# +# From nova.conf +# + +# +# Availability zone for internal services. For more information, refer to the +# documentation. (string value) +#internal_service_availability_zone = internal + +# +# Default availability zone for compute services. For more information, refer to +# the documentation. (string value) +#default_availability_zone = nova + +# +# Default availability zone for instances. For more information, refer to the +# documentation. (string value) +#default_schedule_zone = + +# Length of generated instance admin passwords (integer value) +# Minimum value: 0 +#password_length = 12 + +# +# Time period to generate instance usages for. It is possible to define optional +# offset to given period by appending @ character followed by a number defining +# offset. For more information, refer to the documentation. (string value) +#instance_usage_audit_period = month + +# +# Start and use a daemon that can run the commands that need to be run with +# root privileges. This option is usually enabled on nodes that run nova compute +# processes. +# (boolean value) +#use_rootwrap_daemon = false + +# +# Path to the rootwrap configuration file. For more information, refer to the +# documentation. (string value) +#rootwrap_config = /etc/nova/rootwrap.conf + +# Explicitly specify the temporary working directory (string value) +#tempdir = + +# +# Defines which driver to use for controlling virtualization. For more +# information, refer to the documentation. (string value) +#compute_driver = + +# +# Allow destination machine to match source for resize. Useful when +# testing in single-host environments. By default it is not allowed +# to resize to the same host. Setting this option to true will add +# the same host to the destination options. Also set to true +# if you allow the ServerGroupAffinityFilter and need to resize. +# (boolean value) +#allow_resize_to_same_host = false + +# +# Image properties that should not be inherited from the instance +# when taking a snapshot. For more information, refer to the documentation. +# (list value) +#non_inheritable_image_properties = cache_in_nova,bittorrent + +# +# Maximum number of devices that will result in a local image being +# created on the hypervisor node. For more information, refer to the +# documentation. (integer value) +#max_local_block_devices = 3 + +# +# A comma-separated list of monitors that can be used for getting +# compute metrics. You can use the alias/name from the setuptools +# entry points for nova.compute.monitors.* namespaces. If no +# namespace is supplied, the "cpu." namespace is assumed for +# backwards-compatibility. For more information, refer to the documentation. +# (list value) +#compute_monitors = + +# +# The default format an ephemeral_volume will be formatted with on creation. For +# more information, refer to the documentation. (string value) +#default_ephemeral_format = + +# +# Determine if instance should boot or fail on VIF plugging timeout. For more +# information, refer to the documentation. (boolean value) +#vif_plugging_is_fatal = true + +# +# Timeout for Neutron VIF plugging event message arrival. For more information, +# refer to the documentation. (integer value) +# Minimum value: 0 +#vif_plugging_timeout = 300 + +# +# Timeout for Accelerator Request (ARQ) bind event message arrival. For more +# information, refer to the documentation. (integer value) +# Minimum value: 1 +#arq_binding_timeout = 300 + +# Path to '/etc/network/interfaces' template. For more information, refer to the +# documentation. (string value) +#injected_network_template = $pybasedir/nova/virt/interfaces.template + +# +# The image preallocation mode to use. For more information, refer to the +# documentation. (string value) +# Possible values: +# none - No storage provisioning is done up front +# space - Storage is fully allocated at instance start +#preallocate_images = none + +# +# Enable use of copy-on-write (cow) images. For more information, refer to the +# documentation. (boolean value) +#use_cow_images = true + +# +# Force conversion of backing images to raw format. For more information, refer +# to the documentation. (boolean value) +#force_raw_images = true + +# +# Name of the mkfs commands for ephemeral device. For more information, refer to +# the documentation. (multi valued) +#virt_mkfs = + +# +# Enable resizing of filesystems via a block device. For more information, refer +# to the documentation. (boolean value) +#resize_fs_using_block_device = false + +# Amount of time, in seconds, to wait for NBD device start up (integer value) +# Minimum value: 0 +#timeout_nbd = 10 + +# +# Generic property to specify the pointer type. For more information, refer to +# the documentation. (string value) +# Possible values: +# ps2mouse - Uses relative movement. Mouse connected by PS2 +# usbtablet - Uses absolute movement. Tablet connect by USB +# - Uses default behavior provided by drivers (mouse on PS2 for libvirt +# x86) +#pointer_model = usbtablet + +# DEPRECATED: +# Mask of host CPUs that can be used for ``VCPU`` resources. For more +# information, refer to the documentation. (string value) +# This option is deprecated for removal since 20.0.0. +# Its value may be silently ignored in the future. +# Reason: +# This option has been superseded by the ``[compute] cpu_dedicated_set`` and +# ``[compute] cpu_shared_set`` options, which allow things like the co-existence +# of pinned and unpinned instances on the same host (for the libvirt driver). +#vcpu_pin_set = + +# +# Number of huge/large memory pages to reserved per NUMA host cell. For more +# information, refer to the documentation. (dict value) +#reserved_huge_pages = + +# +# Amount of disk resources in MB to make them always available to host. The +# disk usage gets reported back to the scheduler from nova-compute running +# on the compute nodes. To prevent the disk resources from being considered +# as available, this option can be used to reserve disk space for that host. For +# more information, refer to the documentation. (integer value) +# Minimum value: 0 +#reserved_host_disk_mb = 0 + +# +# Amount of memory in MB to reserve for the host so that it is always available +# to host processes. The host resources usage is reported back to the scheduler +# continuously from nova-compute running on the compute node. To prevent the +# host +# memory from being considered as available, this option is used to reserve +# memory for the host. For more information, refer to the documentation. +# (integer value) +# Minimum value: 0 +#reserved_host_memory_mb = 512 + +# +# Number of host CPUs to reserve for host processes. For more information, refer +# to the documentation. (integer value) +# Minimum value: 0 +#reserved_host_cpus = 0 + +# +# Virtual CPU to physical CPU allocation ratio. For more information, refer to +# the documentation. (floating point value) +# Minimum value: 0.0 +#cpu_allocation_ratio = + +# +# Virtual RAM to physical RAM allocation ratio. For more information, refer to +# the documentation. (floating point value) +# Minimum value: 0.0 +#ram_allocation_ratio = + +# +# Virtual disk to physical disk allocation ratio. For more information, refer to +# the documentation. (floating point value) +# Minimum value: 0.0 +#disk_allocation_ratio = + +# +# Initial virtual CPU to physical CPU allocation ratio. For more information, +# refer to the documentation. (floating point value) +# Minimum value: 0.0 +#initial_cpu_allocation_ratio = 16.0 + +# +# Initial virtual RAM to physical RAM allocation ratio. For more information, +# refer to the documentation. (floating point value) +# Minimum value: 0.0 +#initial_ram_allocation_ratio = 1.5 + +# +# Initial virtual disk to physical disk allocation ratio. For more information, +# refer to the documentation. (floating point value) +# Minimum value: 0.0 +#initial_disk_allocation_ratio = 1.0 + +# +# Console proxy host to be used to connect to instances on this host. It is the +# publicly visible name for the console host. For more information, refer to the +# documentation. (string value) +# +# This option has a sample default set, which means that +# its actual default value may vary from the one documented +# below. +#console_host = + +# +# Name of the network to be used to set access IPs for instances. If there are +# multiple IPs to choose from, an arbitrary one will be chosen. For more +# information, refer to the documentation. (string value) +#default_access_ip_network_name = + +# +# Specifies where instances are stored on the hypervisor's disk. +# It can point to locally attached storage or a directory on NFS. For more +# information, refer to the documentation. (string value) +# +# This option has a sample default set, which means that +# its actual default value may vary from the one documented +# below. +#instances_path = $state_path/instances + +# +# This option enables periodic compute.instance.exists notifications. Each +# compute node must be configured to generate system usage data. These +# notifications are consumed by OpenStack Telemetry service. +# (boolean value) +#instance_usage_audit = false + +# +# Maximum number of 1 second retries in live_migration. It specifies number +# of retries to iptables when it complains. It happens when an user continuously +# sends live-migration request to same host leading to concurrent request +# to iptables. For more information, refer to the documentation. (integer value) +# Minimum value: 0 +#live_migration_retry_count = 30 + +# +# This option specifies whether to start guests that were running before the +# host rebooted. It ensures that all of the instances on a Nova compute node +# resume their state each time the compute node boots or restarts. +# (boolean value) +#resume_guests_state_on_host_boot = false + +# +# Number of times to retry network allocation. It is required to attempt network +# allocation retries if the virtual interface plug fails. For more information, +# refer to the documentation. (integer value) +# Minimum value: 0 +#network_allocate_retries = 0 + +# +# Limits the maximum number of instance builds to run concurrently by +# nova-compute. Compute service can attempt to build an infinite number of +# instances, if asked to do so. This limit is enforced to avoid building +# unlimited instance concurrently on a compute node. This value can be set +# per compute node. For more information, refer to the documentation. (integer +# value) +# Minimum value: 0 +#max_concurrent_builds = 10 + +# +# Maximum number of instance snapshot operations to run concurrently. +# This limit is enforced to prevent snapshots overwhelming the +# host/network/storage and causing failure. This value can be set per +# compute node. For more information, refer to the documentation. (integer +# value) +# Minimum value: 0 +#max_concurrent_snapshots = 5 + +# +# Maximum number of live migrations to run concurrently. This limit is enforced +# to avoid outbound live migrations overwhelming the host/network and causing +# failures. It is not recommended that you change this unless you are very sure +# that doing so is safe and stable in your environment. For more information, +# refer to the documentation. (integer value) +# Minimum value: 0 +#max_concurrent_live_migrations = 1 + +# +# The number of times to check for a volume to be "available" before attaching +# it during server create. For more information, refer to the documentation. +# (integer value) +# Minimum value: 0 +#block_device_allocate_retries = 60 + +# +# Number of greenthreads available for use to sync power states. For more +# information, refer to the documentation. (integer value) +#sync_power_state_pool_size = 1000 + +# +# Interval to sync power states between the database and the hypervisor. For +# more information, refer to the documentation. (integer value) +#sync_power_state_interval = 600 + +# +# Interval between instance network information cache updates. For more +# information, refer to the documentation. (integer value) +#heal_instance_info_cache_interval = 60 + +# +# Interval for reclaiming deleted instances. For more information, refer to the +# documentation. (integer value) +#reclaim_instance_interval = 0 + +# +# Interval for gathering volume usages. For more information, refer to the +# documentation. (integer value) +#volume_usage_poll_interval = 0 + +# +# Interval for polling shelved instances to offload. For more information, refer +# to the documentation. (integer value) +#shelved_poll_interval = 3600 + +# +# Time before a shelved instance is eligible for removal from a host. For more +# information, refer to the documentation. (integer value) +#shelved_offload_time = 0 + +# +# Interval for retrying failed instance file deletes. For more information, +# refer to the documentation. (integer value) +#instance_delete_interval = 300 + +# +# Interval (in seconds) between block device allocation retries on failures. For +# more information, refer to the documentation. (integer value) +# Minimum value: 0 +#block_device_allocate_retries_interval = 3 + +# +# Interval between sending the scheduler a list of current instance UUIDs to +# verify that its view of instances is in sync with nova. For more information, +# refer to the documentation. (integer value) +#scheduler_instance_sync_interval = 120 + +# +# Interval for updating compute resources. For more information, refer to the +# documentation. (integer value) +#update_resources_interval = 0 + +# +# Time interval after which an instance is hard rebooted automatically. For more +# information, refer to the documentation. (integer value) +# Minimum value: 0 +#reboot_timeout = 0 + +# +# Maximum time in seconds that an instance can take to build. For more +# information, refer to the documentation. (integer value) +# Minimum value: 0 +#instance_build_timeout = 0 + +# +# Interval to wait before un-rescuing an instance stuck in RESCUE. For more +# information, refer to the documentation. (integer value) +# Minimum value: 0 +#rescue_timeout = 0 + +# +# Automatically confirm resizes after N seconds. For more information, refer to +# the documentation. (integer value) +# Minimum value: 0 +#resize_confirm_window = 0 + +# +# Total time to wait in seconds for an instance to perform a clean +# shutdown. For more information, refer to the documentation. (integer value) +# Minimum value: 0 +#shutdown_timeout = 60 + +# +# The compute service periodically checks for instances that have been +# deleted in the database but remain running on the compute node. The +# above option enables action to be taken when such instances are +# identified. For more information, refer to the documentation. (string value) +# Possible values: +# reap - Powers down the instances and deletes them +# log - Logs warning message about deletion of the resource +# shutdown - Powers down instances and marks them as non-bootable which can be +# later used for debugging/analysis +# noop - Takes no action +#running_deleted_instance_action = reap + +# +# Time interval in seconds to wait between runs for the clean up action. +# If set to 0, above check will be disabled. If "running_deleted_instance +# _action" is set to "log" or "reap", a value greater than 0 must be set. For +# more information, refer to the documentation. (integer value) +#running_deleted_instance_poll_interval = 1800 + +# +# Time interval in seconds to wait for the instances that have +# been marked as deleted in database to be eligible for cleanup. For more +# information, refer to the documentation. (integer value) +#running_deleted_instance_timeout = 0 + +# +# The number of times to attempt to reap an instance's files. For more +# information, refer to the documentation. (integer value) +# Minimum value: 1 +#maximum_instance_delete_attempts = 5 + +# +# Sets the scope of the check for unique instance names. For more information, +# refer to the documentation. (string value) +# Possible values: +# '' - An empty value means that no uniqueness check is done and duplicate names +# are possible +# project - The instance name check is done only for instances within the same +# project +# global - The instance name check is done for all instances regardless of the +# project +#osapi_compute_unique_server_name_scope = + +# +# Enable new nova-compute services on this host automatically. For more +# information, refer to the documentation. (boolean value) +#enable_new_services = true + +# +# Template string to be used to generate instance names. For more information, +# refer to the documentation. (string value) +#instance_name_template = instance-%08x + +# +# Number of times to retry live-migration before failing. For more information, +# refer to the documentation. (integer value) +# Minimum value: -1 +#migrate_max_retries = -1 + +# DEPRECATED: +# Config drive format. For more information, refer to the documentation. (string +# value) +# Possible values: +# iso9660 - A file system image standard that is widely supported across +# operating systems. +# vfat - Provided for legacy reasons and to enable live migration with the +# libvirt driver and non-shared storage +# This option is deprecated for removal since 19.0.0. +# Its value may be silently ignored in the future. +# Reason: +# This option was originally added as a workaround for bug in libvirt, #1246201, +# that was resolved in libvirt v1.2.17. As a result, this option is no longer +# necessary or useful. +#config_drive_format = iso9660 + +# +# Force injection to take place on a config drive. For more information, refer +# to the documentation. (boolean value) +#force_config_drive = false + +# +# Name or path of the tool used for ISO image creation. For more information, +# refer to the documentation. (string value) +#mkisofs_cmd = genisoimage + +# +# The IP address which the host is using to connect to the management network. +# For more information, refer to the documentation. (string value) +# +# This option has a sample default set, which means that +# its actual default value may vary from the one documented +# below. +#my_ip = + +# +# The IP address which is used to connect to the block storage network. For more +# information, refer to the documentation. (string value) +#my_block_storage_ip = $my_ip + +# +# Hostname, FQDN or IP address of this host. For more information, refer to the +# documentation. (string value) +# +# This option has a sample default set, which means that +# its actual default value may vary from the one documented +# below. +#host = + +# +# This option determines whether the network setup information is injected into +# the VM before it is booted. While it was originally designed to be used only +# by nova-network, it is also used by the vmware virt driver to control whether +# network information is injected into a VM. The libvirt virt driver also uses +# it +# when we use config_drive to configure network to control whether network +# information is injected into a VM. +# (boolean value) +#flat_injected = false + +# +# Filename that will be used for storing websocket frames received +# and sent by a proxy service (like VNC, spice, serial) running on this host. +# If this is not set, no recording will be done. +# (string value) +#record = + +# Run as a background process (boolean value) +#daemon = false + +# +# Disallow non-encrypted connections. For more information, refer to the +# documentation. (boolean value) +#ssl_only = false + +# Set to True if source host is addressed with IPv6 (boolean value) +#source_is_ipv6 = false + +# +# Path to SSL certificate file. For more information, refer to the +# documentation. (string value) +#cert = self.pem + +# +# SSL key file (if separate from cert). For more information, refer to the +# documentation. (string value) +#key = + +# +# Path to directory with content which will be served by a web server. +# (string value) +#web = /usr/share/spice-html5 + +# +# The directory where the Nova python modules are installed. For more +# information, refer to the documentation. (string value) +# +# This option has a sample default set, which means that +# its actual default value may vary from the one documented +# below. +#pybasedir = + +# +# The top-level directory for maintaining Nova's state. For more information, +# refer to the documentation. (string value) +#state_path = $pybasedir + +# +# This option allows setting an alternate timeout value for RPC calls +# that have the potential to take a long time. If set, RPC calls to +# other services will use this value for the timeout (in seconds) +# instead of the global rpc_response_timeout value. For more information, refer +# to the documentation. (integer value) +#long_rpc_timeout = 1800 + +# +# Number of seconds indicating how frequently the state of services on a +# given hypervisor is reported. Nova needs to know this to determine the +# overall health of the deployment. For more information, refer to the +# documentation. (integer value) +#report_interval = 10 + +# +# Maximum time in seconds since last check-in for up service. For more +# information, refer to the documentation. (integer value) +#service_down_time = 60 + +# +# Enable periodic tasks. For more information, refer to the documentation. +# (boolean value) +#periodic_enable = true + +# +# Number of seconds to randomly delay when starting the periodic task +# scheduler to reduce stampeding. For more information, refer to the +# documentation. (integer value) +# Minimum value: 0 +#periodic_fuzzy_delay = 60 + +# List of APIs to be enabled by default (list value) +#enabled_apis = osapi_compute,metadata + +# +# List of APIs with enabled SSL. For more information, refer to the +# documentation. (list value) +#enabled_ssl_apis = + +# +# IP address on which the OpenStack API will listen. For more information, refer +# to the documentation. (string value) +#osapi_compute_listen = 0.0.0.0 + +# +# Port on which the OpenStack API will listen. For more information, refer to +# the documentation. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#osapi_compute_listen_port = 8774 + +# +# Number of workers for OpenStack API service. The default will be the number +# of CPUs available. For more information, refer to the documentation. (integer +# value) +# Minimum value: 1 +#osapi_compute_workers = + +# +# IP address on which the metadata API will listen. For more information, refer +# to the documentation. (string value) +#metadata_listen = 0.0.0.0 + +# +# Port on which the metadata API will listen. For more information, refer to the +# documentation. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#metadata_listen_port = 8775 + +# +# Number of workers for metadata service. If not specified the number of +# available CPUs will be used. For more information, refer to the documentation. +# (integer value) +# Minimum value: 1 +#metadata_workers = + +# +# This option specifies the driver to be used for the servicegroup service. For +# more information, refer to the documentation. (string value) +# Possible values: +# db - Database ServiceGroup driver +# mc - Memcache ServiceGroup driver +#servicegroup_driver = db + +# +# From oslo.log +# + +# If set to true, the logging level will be set to DEBUG instead of the default +# INFO level (boolean value) +# Note: This option can be changed without restarting. +#debug = false + +# The name of a logging configuration file. This file is appended to any +# existing logging configuration files. For details about logging configuration +# files, see the Python logging module documentation. Note that when logging +# configuration files are used then all logging configuration is set in the +# configuration file and other logging configuration options are ignored (for +# example, log-date-format) (string value) +# Note: This option can be changed without restarting. +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append = + +# Defines the format string for %%(asctime)s in log records. Default: +# %(default)s . This option is ignored if log_config_append is set (string +# value) +#log_date_format = %Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to send logging output to. If no default is set, +# logging will go to stderr as defined by use_stderr. This option is ignored if +# log_config_append is set (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file = + +# (Optional) The base directory used for relative log_file paths. This option +# is ignored if log_config_append is set (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir = + +# Uses logging handler designed to watch file system. When log file is moved or +# removed this handler will open a new log file with specified path +# instantaneously. It makes sense only if log_file option is specified and Linux +# platform is used. This option is ignored if log_config_append is set (boolean +# value) +#watch_log_file = false + +# Use syslog for logging. Existing syslog format is DEPRECATED and will be +# changed later to honor RFC5424. This option is ignored if log_config_append is +# set (boolean value) +#use_syslog = false + +# Enable journald for logging. If running in a systemd environment you may wish +# to enable journal support. Doing so will use the journal native protocol which +# includes structured metadata in addition to log messages.This option is +# ignored if log_config_append is set (boolean value) +#use_journal = false + +# Syslog facility to receive log lines. This option is ignored if +# log_config_append is set (string value) +#syslog_log_facility = LOG_USER + +# Use JSON formatting for logging. This option is ignored if log_config_append +# is set (boolean value) +#use_json = false + +# Log output to standard error. This option is ignored if log_config_append is +# set (boolean value) +#use_stderr = false + +# Log output to Windows Event Log (boolean value) +#use_eventlog = false + +# The amount of time before the log files are rotated. This option is ignored +# unless log_rotation_type is setto "interval" (integer value) +#log_rotate_interval = 1 + +# Rotation interval type. The time of the last file change (or the time when the +# service was started) is used when scheduling the next rotation (string value) +# Possible values: +# Seconds - +# Minutes - +# Hours - +# Days - +# Weekday - +# Midnight - +#log_rotate_interval_type = days + +# Maximum number of rotated log files (integer value) +#max_logfile_count = 30 + +# Log file maximum size in MB. This option is ignored if "log_rotation_type" is +# not set to "size" (integer value) +#max_logfile_size_mb = 200 + +# Log rotation type (string value) +# Possible values: +# interval - Rotate logs at predefined time intervals. +# size - Rotate logs once they reach a predefined size. +# none - Do not rotate log files. +#log_rotation_type = none + +# Format string to use for log messages with context. Used by +# oslo_log.formatters.ContextFormatter (string value) +#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages when context is undefined. Used by +# oslo_log.formatters.ContextFormatter (string value) +#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Additional data to append to log message when logging level for the message is +# DEBUG. Used by oslo_log.formatters.ContextFormatter (string value) +#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. Used by +# oslo_log.formatters.ContextFormatter (string value) +#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s + +# Defines the format string for %(user_identity)s that is used in +# logging_context_format_string. Used by oslo_log.formatters.ContextFormatter +# (string value) +#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s + +# List of package logging levels in logger=LEVEL pairs. This option is ignored +# if log_config_append is set (list value) +#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,oslo_policy=INFO,dogpile.core.dogpile=INFO,glanceclient=WARN,oslo.privsep.daemon=INFO + +# Enables or disables publication of error events (boolean value) +#publish_errors = false + +# The format for an instance that is passed with the log message (string value) +#instance_format = "[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log message (string +# value) +#instance_uuid_format = "[instance: %(uuid)s] " + +# Interval, number of seconds, of log rate limiting (integer value) +#rate_limit_interval = 0 + +# Maximum number of logged messages per rate_limit_interval (integer value) +#rate_limit_burst = 0 + +# Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG or +# empty string. Logs with level greater or equal to rate_limit_except_level are +# not filtered. An empty string means that all levels are filtered (string +# value) +#rate_limit_except_level = CRITICAL + +# Enables or disables fatal status of deprecations (boolean value) +#fatal_deprecations = false + +# +# From oslo.messaging +# + +# Size of RPC connection pool (integer value) +# Minimum value: 1 +#rpc_conn_pool_size = 30 + +# The pool size limit for connections expiration policy (integer value) +#conn_pool_min_size = 2 + +# The time-to-live in sec of idle connections in the pool (integer value) +#conn_pool_ttl = 1200 + +# Size of executor thread pool when executor is threading or eventlet (integer +# value) +# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size +#executor_thread_pool_size = 64 + +# Seconds to wait for a response from a call (integer value) +#rpc_response_timeout = 60 + +# The network address and optional user credentials for connecting to the +# messaging backend, in URL format. The expected format is. For more +# information, refer to the documentation. (string value) +transport_url = {{ amqp.transport_url }} + +# The default exchange under which topics are scoped. May be overridden by an +# exchange name specified in the transport_url option (string value) +#control_exchange = nova + +# Add an endpoint to answer to ping calls. Endpoint is named +# oslo_rpc_server_ping (boolean value) +#rpc_ping_enabled = false + +# +# From oslo.service.periodic_task +# + +# Some periodic tasks can be run in a separate process. Should we run them here? +# (boolean value) +#run_external_periodic_tasks = true + +# +# From oslo.service.service +# + +# Enable eventlet backdoor. Acceptable values are 0, , and :, +# where 0 results in listening on a random tcp port number; results in +# listening on the specified port number (and not enabling backdoor if that port +# is in use); and : results in listening on the smallest unused port +# number within the specified range of port numbers. The chosen port is +# displayed in the service's log file (string value) +#backdoor_port = + +# Enable eventlet backdoor, using the provided path as a unix socket that can +# receive connections. This option is mutually exclusive with 'backdoor_port' in +# that only one should be provided. If both are provided then the existence of +# this option overrides the usage of that option. Inside the path {pid} will be +# replaced with the PID of the current process (string value) +#backdoor_socket = + +# Enables or disables logging values of all registered options when starting a +# service (at DEBUG level) (boolean value) +#log_options = true + +# Specify a timeout after which a gracefully shutdown server will exit. Zero +# value means endless wait (integer value) +#graceful_shutdown_timeout = 60 + + +[api] +# +# Options under this group are used to define Nova API. + +# +# From nova.conf +# + +# DEPRECATED: +# Determine the strategy to use for authentication. +# (string value) +# Possible values: +# keystone - Use keystone for authentication. +# noauth2 - Designed for testing only, as it does no actual credential checking. +# 'noauth2' provides administrative credentials only if 'admin' is specified as +# the username. +# This option is deprecated for removal since 21.0.0. +# Its value may be silently ignored in the future. +# Reason: +# The only non-default choice, ``noauth2``, is for internal development and +# testing purposes only and should not be used in deployments. This option and +# its middleware, NoAuthMiddleware[V2_18], will be removed in a future release. +#auth_strategy = keystone + +# +# When True, the 'X-Forwarded-For' header is treated as the canonical remote +# address. When False (the default), the 'remote_address' header is used. For +# more information, refer to the documentation. (boolean value) +#use_forwarded_for = false + +# +# When gathering the existing metadata for a config drive, the EC2-style +# metadata is returned for all versions that don't appear in this option. +# As of the Liberty release, the available versions are. For more information, +# refer to the documentation. (string value) +#config_drive_skip_versions = 1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01 + +# +# A list of vendordata providers. For more information, refer to the +# documentation. (list value) +#vendordata_providers = StaticJSON + +# +# A list of targets for the dynamic vendordata provider. These targets are of +# the form ``@``. For more information, refer to the documentation. +# (list value) +#vendordata_dynamic_targets = + +# +# Path to an optional certificate file or CA bundle to verify dynamic +# vendordata REST services ssl certificates against. For more information, refer +# to the documentation. (string value) +#vendordata_dynamic_ssl_certfile = + +# +# Maximum wait time for an external REST service to connect. For more +# information, refer to the documentation. (integer value) +# Minimum value: 3 +#vendordata_dynamic_connect_timeout = 5 + +# +# Maximum wait time for an external REST service to return data once connected. +# For more information, refer to the documentation. (integer value) +# Minimum value: 0 +#vendordata_dynamic_read_timeout = 5 + +# +# Should failures to fetch dynamic vendordata be fatal to instance boot?. For +# more information, refer to the documentation. (boolean value) +#vendordata_dynamic_failure_fatal = false + +# +# This option is the time (in seconds) to cache metadata. When set to 0, +# metadata caching is disabled entirely; this is generally not recommended for +# performance reasons. Increasing this setting should improve response times +# of the metadata API when under heavy load. Higher values may increase memory +# usage, and result in longer times for host metadata changes to take effect. +# (integer value) +# Minimum value: 0 +#metadata_cache_expiration = 15 + +# +# Indicates that the nova-metadata API service has been deployed per-cell, so +# that we can have better performance and data isolation in a multi-cell +# deployment. Users should consider the use of this configuration depending on +# how neutron is setup. If you have networks that span cells, you might need to +# run nova-metadata API service globally. If your networks are segmented along +# cell boundaries, then you can run nova-metadata API service per cell. When +# running nova-metadata API service per cell, you should also configure each +# Neutron metadata-agent to point to the corresponding nova-metadata API +# service. +# (boolean value) +#local_metadata_per_cell = false + +# +# Domain name used to configure FQDN for instances. For more information, refer +# to the documentation. (string value) +#dhcp_domain = novalocal + +# +# Cloud providers may store custom data in vendor data file that will then be +# available to the instances via the metadata service, and to the rendering of +# config-drive. The default class for this, JsonFileVendorData, loads this +# information from a JSON file, whose path is configured by this option. If +# there is no path set by this option, the class returns an empty dictionary. +# For more information, refer to the documentation. (string value) +#vendordata_jsonfile_path = + +# +# As a query can potentially return many thousands of items, you can limit the +# maximum number of items in a single response by setting this option. +# (integer value) +# Minimum value: 0 +# Deprecated group/name - [DEFAULT]/osapi_max_limit +#max_limit = 1000 + +# +# This string is prepended to the normal URL that is returned in links to the +# OpenStack Compute API. If it is empty (the default), the URLs are returned +# unchanged. For more information, refer to the documentation. (string value) +# Deprecated group/name - [DEFAULT]/osapi_compute_link_prefix +#compute_link_prefix = + +# +# This string is prepended to the normal URL that is returned in links to +# Glance resources. If it is empty (the default), the URLs are returned +# unchanged. For more information, refer to the documentation. (string value) +# Deprecated group/name - [DEFAULT]/osapi_glance_link_prefix +#glance_link_prefix = + +# +# When enabled, this will cause the API to only query cell databases +# in which the tenant has mapped instances. This requires an additional +# (fast) query in the API database before each list, but also +# (potentially) limits the number of cell databases that must be queried +# to provide the result. If you have a small number of cells, or tenants +# are likely to have instances in all cells, then this should be +# False. If you have many cells, especially if you confine tenants to a +# small subset of those cells, this should be True. +# (boolean value) +#instance_list_per_project_cells = false + +# +# This controls the method by which the API queries cell databases in +# smaller batches during large instance list operations. If batching is +# performed, a large instance list operation will request some fraction +# of the overall API limit from each cell database initially, and will +# re-request that same batch size as records are consumed (returned) +# from each cell as necessary. Larger batches mean less chattiness +# between the API and the database, but potentially more wasted effort +# processing the results from the database which will not be returned to +# the user. Any strategy will yield a batch size of at least 100 records, +# to avoid a user causing many tiny database queries in their request. For more +# information, refer to the documentation. (string value) +# Possible values: +# distributed - Divide the limit requested by the user by the number of cells in +# the system. This requires counting the cells in the system initially, which +# will not be refreshed until service restart or SIGHUP. The actual batch size +# will be increased by 10% over the result of ($limit / $num_cells). +# fixed - Request fixed-size batches from each cell, as defined by +# ``instance_list_cells_batch_fixed_size``. If the limit is smaller than the +# batch size, the limit will be used instead. If you do not wish batching to be +# used at all, setting the fixed size equal to the ``max_limit`` value will +# cause only one request per cell database to be issued. +#instance_list_cells_batch_strategy = distributed + +# +# This controls the batch size of instances requested from each cell +# database if ``instance_list_cells_batch_strategy``` is set to ``fixed``. +# This integral value will define the limit issued to each cell every time +# a batch of instances is requested, regardless of the number of cells in +# the system or any other factors. Per the general logic called out in +# the documentation for ``instance_list_cells_batch_strategy``, the +# minimum value for this is 100 records per batch. For more information, refer +# to the documentation. (integer value) +# Minimum value: 100 +#instance_list_cells_batch_fixed_size = 100 + +# +# When set to False, this will cause the API to return a 500 error if there is +# an +# infrastructure failure like non-responsive cells. If you want the API to skip +# the down cells and return the results from the up cells set this option to +# True. For more information, refer to the documentation. (boolean value) +#list_records_by_skipping_down_cells = true + +# +# When True, the TenantNetworkController will query the Neutron API to get the +# default networks to use. For more information, refer to the documentation. +# (boolean value) +#use_neutron_default_nets = false + +# +# Tenant ID for getting the default network from Neutron API (also referred in +# some places as the 'project ID') to use. For more information, refer to the +# documentation. (string value) +#neutron_default_tenant_id = default + +# +# Enables returning of the instance password by the relevant server API calls +# such as create, rebuild, evacuate, or rescue. If the hypervisor does not +# support password injection, then the password returned will not be correct, +# so if your hypervisor does not support password injection, set this to False. +# (boolean value) +#enable_instance_password = true + + +[api_database] +{% if shared_db.nova_api -%} +connection = {{ shared_db.nova_api.connection }} +{% else -%} +connection = sqlite:////var/lib/nova/nova_api.sqlite +{% endif -%} +connection_recycle_time = 200 + +# +# The *Nova API Database* is a separate database which is used for information +# which is used across *cells*. This database is mandatory since the Mitaka +# release (13.0.0). +# +# This group should **not** be configured for the ``nova-compute`` service. + +# +# From nova.conf +# + +# The SQLAlchemy connection string to use to connect to the database. Do not set +# this for the ``nova-compute`` service (string value) +#connection = + +# Optional URL parameters to append onto the connection URL at connect time; +# specify as param1=value1¶m2=value2& (string value) +#connection_parameters = + +# If True, SQLite uses synchronous mode (boolean value) +#sqlite_synchronous = true + +# The SQLAlchemy connection string to use to connect to the slave database +# (string value) +#slave_connection = + +# The SQL mode to be used for MySQL sessions. This option, including the +# default, overrides any server-set SQL mode. To use whatever SQL mode is set by +# the server configuration, set this to no value. Example: mysql_sql_mode= +# (string value) +#mysql_sql_mode = TRADITIONAL + +# Connections which have been present in the connection pool longer than this +# number of seconds will be replaced with a new one the next time they are +# checked out from the pool (integer value) +# Deprecated group/name - [api_database]/idle_timeout +#connection_recycle_time = 3600 + +# Maximum number of SQL connections to keep open in a pool. Setting a value of 0 +# indicates no limit (integer value) +#max_pool_size = + +# Maximum number of database connection retries during startup. Set to -1 to +# specify an infinite retry count (integer value) +#max_retries = 10 + +# Interval between retries of opening a SQL connection (integer value) +#retry_interval = 10 + +# If set, use this value for max_overflow with SQLAlchemy (integer value) +#max_overflow = + +# Verbosity of SQL debugging information: 0=None, 100=Everything (integer value) +#connection_debug = 0 + +# Add Python stack traces to SQL as comment strings (boolean value) +#connection_trace = false + +# If set, use this value for pool_timeout with SQLAlchemy (integer value) +#pool_timeout = + + +[barbican] + +# +# From nova.conf +# + +# Use this endpoint to connect to Barbican, for example: +# "http://localhost:9311/" (string value) +#barbican_endpoint = + +# Version of the Barbican API, for example: "v1" (string value) +#barbican_api_version = + +# Use this endpoint to connect to Keystone (string value) +# Deprecated group/name - [key_manager]/auth_url +#auth_endpoint = http://localhost/identity/v3 + +# Number of seconds to wait before retrying poll for key creation completion +# (integer value) +#retry_delay = 1 + +# Number of times to retry poll for key creation completion (integer value) +#number_of_retries = 60 + +# Specifies if insecure TLS (https) requests. If False, the server's certificate +# will not be validated, if True, we can set the verify_ssl_path config +# meanwhile (boolean value) +#verify_ssl = true + +# A path to a bundle or CA certs to check against, or None for requests to +# attempt to locate and use certificates which verify_ssh is True. If verify_ssl +# is False, this is ignored (string value) +#verify_ssl_path = + +# Specifies the type of endpoint. Allowed values are: public, private, and +# admin (string value) +# Possible values: +# public - +# internal - +# admin - +#barbican_endpoint_type = public + + +[cache] + +# +# From nova.conf +# + +# Prefix for building the configuration dictionary for the cache region. This +# should not need to be changed unless there is another dogpile.cache region +# with the same configuration name (string value) +#config_prefix = cache.oslo + +# Default TTL, in seconds, for any cached item in the dogpile.cache region. This +# applies to any cached method that doesn't have an explicit cache expiration +# time defined for it (integer value) +#expiration_time = 600 + +# Cache backend module. For eventlet-based or environments with hundreds of +# threaded servers, Memcache with pooling (oslo_cache.memcache_pool) is +# recommended. For environments with less than 100 threaded servers, Memcached +# (dogpile.cache.memcached) or Redis (dogpile.cache.redis) is recommended. Test +# environments with a single instance of the server can use the +# dogpile.cache.memory backend (string value) +# Possible values: +# oslo_cache.memcache_pool - +# oslo_cache.dict - +# oslo_cache.mongo - +# oslo_cache.etcd3gw - +# dogpile.cache.pymemcache - +# dogpile.cache.memcached - +# dogpile.cache.pylibmc - +# dogpile.cache.bmemcached - +# dogpile.cache.dbm - +# dogpile.cache.redis - +# dogpile.cache.memory - +# dogpile.cache.memory_pickle - +# dogpile.cache.null - +#backend = dogpile.cache.null + +# Arguments supplied to the backend module. Specify this option once per +# argument to be passed to the dogpile.cache backend. Example format: +# ":" (multi valued) +#backend_argument = + +# Proxy classes to import that will affect the way the dogpile.cache backend +# functions. See the dogpile.cache documentation on changing-backend-behavior +# (list value) +#proxies = + +# Global toggle for caching (boolean value) +#enabled = false + +# Extra debugging from the cache backend (cache keys, get/set/delete/etc calls). +# This is only really useful if you need to see the specific cache-backend +# get/set/delete calls with the keys/values. Typically this should be left set +# to false (boolean value) +#debug_cache_backend = false + +# Memcache servers in the format of "host:port". (dogpile.cache.memcached and +# oslo_cache.memcache_pool backends only). If a given host refer to an IPv6 or a +# given domain refer to IPv6 then you should prefix the given address with the +# address family (``inet6``) (e.g ``inet6[::1]:11211``, +# ``inet6:[fd12:3456:789a:1::1]:11211``, +# ``inet6:[controller-0.internalapi]:11211``). If the address family is not +# given then default address family used will be ``inet`` which correspond to +# IPv4 (list value) +#memcache_servers = localhost:11211 + +# Number of seconds memcached server is considered dead before it is tried +# again. (dogpile.cache.memcache and oslo_cache.memcache_pool backends only) +# (integer value) +#memcache_dead_retry = 300 + +# Timeout in seconds for every call to a server. (dogpile.cache.memcache and +# oslo_cache.memcache_pool backends only) (floating point value) +#memcache_socket_timeout = 1.0 + +# Max total number of open connections to every memcached server. +# (oslo_cache.memcache_pool backend only) (integer value) +#memcache_pool_maxsize = 10 + +# Number of seconds a connection to memcached is held unused in the pool before +# it is closed. (oslo_cache.memcache_pool backend only) (integer value) +#memcache_pool_unused_timeout = 60 + +# Number of seconds that an operation will wait to get a memcache client +# connection (integer value) +#memcache_pool_connection_get_timeout = 10 + +# Global toggle if memcache will be flushed on reconnect. +# (oslo_cache.memcache_pool backend only) (boolean value) +#memcache_pool_flush_on_reconnect = false + +# Global toggle for TLS usage when comunicating with the caching servers +# (boolean value) +#tls_enabled = false + +# Path to a file of concatenated CA certificates in PEM format necessary to +# establish the caching servers' authenticity. If tls_enabled is False, this +# option is ignored (string value) +#tls_cafile = + +# Path to a single file in PEM format containing the client's certificate as +# well as any number of CA certificates needed to establish the certificate's +# authenticity. This file is only required when client side authentication is +# necessary. If tls_enabled is False, this option is ignored (string value) +#tls_certfile = + +# Path to a single file containing the client's private key in. Otherwhise the +# private key will be taken from the file specified in tls_certfile. If +# tls_enabled is False, this option is ignored (string value) +#tls_keyfile = + +# Set the available ciphers for sockets created with the TLS context. It should +# be a string in the OpenSSL cipher list format. If not specified, all OpenSSL +# enabled ciphers will be available (string value) +#tls_allowed_ciphers = + + +[cinder] + +# +# From nova.conf +# + +# +# Info to match when looking for cinder in the service catalog. For more +# information, refer to the documentation. (string value) +#catalog_info = volumev3::publicURL + +# +# If this option is set then it will override service catalog lookup with +# this template for cinder endpoint. For more information, refer to the +# documentation. (string value) +#endpoint_template = + +# +# Region name of this node. This is used when picking the URL in the service +# catalog. For more information, refer to the documentation. (string value) +#os_region_name = + +# +# Number of times cinderclient should retry on any failed http call. +# 0 means connection is attempted only once. Setting it to any positive integer +# means that on failure connection is retried that many times e.g. setting it +# to 3 means total attempts to connect will be 4. For more information, refer to +# the documentation. (integer value) +# Minimum value: 0 +#http_retries = 3 + +# +# Allow attach between instance and volume in different availability zones. For +# more information, refer to the documentation. (boolean value) +#cross_az_attach = true + +# PEM encoded Certificate Authority to use when verifying HTTPs connections +# (string value) +#cafile = + +# PEM encoded client certificate cert file (string value) +#certfile = + +# PEM encoded client certificate key file (string value) +#keyfile = + +# Verify HTTPS connections (boolean value) +#insecure = false + +# Timeout value for http requests (integer value) +#timeout = + +# Collect per-API call timing information (boolean value) +#collect_timing = false + +# Log requests to multiple loggers (boolean value) +#split_loggers = false + +# Authentication type to load (string value) +# Deprecated group/name - [cinder]/auth_plugin +#auth_type = + +# Config Section from which to load plugin specific options (string value) +#auth_section = + +# Authentication URL (string value) +#auth_url = + +# Scope for system operations (string value) +#system_scope = + +# Domain ID to scope to (string value) +#domain_id = + +# Domain name to scope to (string value) +#domain_name = + +# Project ID to scope to (string value) +#project_id = + +# Project name to scope to (string value) +#project_name = + +# Domain ID containing project (string value) +#project_domain_id = + +# Domain name containing project (string value) +#project_domain_name = + +# Trust ID (string value) +#trust_id = + +# Optional domain ID to use with v3 and v2 parameters. It will be used for both +# the user and project domain in v3 and ignored in v2 authentication (string +# value) +#default_domain_id = + +# Optional domain name to use with v3 API and v2 parameters. It will be used for +# both the user and project domain in v3 and ignored in v2 authentication +# (string value) +#default_domain_name = + +# User ID (string value) +#user_id = + +# Username (string value) +# Deprecated group/name - [cinder]/user_name +#username = + +# User's domain id (string value) +#user_domain_id = + +# User's domain name (string value) +#user_domain_name = + +# User's password (string value) +#password = + +# Tenant ID (string value) +#tenant_id = + +# Tenant Name (string value) +#tenant_name = + + +[compute] + +# +# From nova.conf +# + +# +# Enables reporting of build failures to the scheduler. For more information, +# refer to the documentation. (integer value) +#consecutive_build_service_disable_threshold = 10 + +# +# Time to wait in seconds before resending an ACPI shutdown signal to +# instances. For more information, refer to the documentation. (integer value) +# Minimum value: 1 +#shutdown_retry_interval = 10 + +# +# Interval for updating nova-compute-side cache of the compute node resource +# provider's inventories, aggregates, and traits. For more information, refer to +# the documentation. (integer value) +# Minimum value: 0 +# Note: This option can be changed without restarting. +#resource_provider_association_refresh = 300 + +# +# Mask of host CPUs that can be used for ``VCPU`` resources and offloaded +# emulator threads. For more information, refer to the documentation. (string +# value) +#cpu_shared_set = + +# +# Mask of host CPUs that can be used for ``PCPU`` resources. For more +# information, refer to the documentation. (string value) +#cpu_dedicated_set = + +# +# Determine if the source compute host should wait for a ``network-vif-plugged`` +# event from the (neutron) networking service before starting the actual +# transfer +# of the guest to the destination compute host. For more information, refer to +# the documentation. (boolean value) +#live_migration_wait_for_vif_plug = true + +# +# Number of concurrent disk-IO-intensive operations (glance image downloads, +# image format conversions, etc.) that we will do in parallel. If this is set +# too high then response time suffers. +# The default value of 0 means no limit. +# (integer value) +# Minimum value: 0 +#max_concurrent_disk_ops = 0 + +# +# Maximum number of disk devices allowed to attach to a single server. Note +# that the number of disks supported by an server depends on the bus used. For +# example, the ``ide`` disk bus is limited to 4 attached devices. The configured +# maximum is enforced during server create, rebuild, evacuate, unshelve, live +# migrate, and attach volume. For more information, refer to the documentation. +# (integer value) +# Minimum value: -1 +#max_disk_devices_to_attach = -1 + +# +# Location of YAML files containing resource provider configuration data. For +# more information, refer to the documentation. (string value) +#provider_config_location = /etc/nova/provider_config/ + +# +# A list of image formats that should not be advertised as supported by this +# compute node. For more information, refer to the documentation. (list value) +#image_type_exclude_list = + + +[conductor] +# +# Options under this group are used to define Conductor's communication, +# which manager should be act as a proxy between computes and database, +# and finally, how many worker processes will be used. + +# +# From nova.conf +# + +# +# Number of workers for OpenStack Conductor service. The default will be the +# number of CPUs available. +# (integer value) +#workers = + + +[console] +# +# Options under this group allow to tune the configuration of the console proxy +# service. +# +# Note: in configuration of every compute is a ``console_host`` option, +# which allows to select the console proxy service to connect to. + +# +# From nova.conf +# + +# +# Adds list of allowed origins to the console websocket proxy to allow +# connections from other origin hostnames. +# Websocket proxy matches the host header with the origin header to +# prevent cross-site requests. This list specifies if any there are +# values other than host are allowed in the origin header. For more information, +# refer to the documentation. (list value) +# Deprecated group/name - [DEFAULT]/console_allowed_origins +#allowed_origins = + +# +# OpenSSL cipher preference string that specifies what ciphers to allow for TLS +# connections from clients. For example. For more information, refer to the +# documentation. (string value) +#ssl_ciphers = + +# +# Minimum allowed SSL/TLS protocol version. For more information, refer to the +# documentation. (string value) +# Possible values: +# default - Use the underlying system OpenSSL defaults +# tlsv1_1 - Require TLS v1.1 or greater for TLS connections +# tlsv1_2 - Require TLS v1.2 or greater for TLS connections +# tlsv1_3 - Require TLS v1.3 or greater for TLS connections +#ssl_minimum_version = default + + +[consoleauth] + +# +# From nova.conf +# + +# +# The lifetime of a console auth token (in seconds). For more information, refer +# to the documentation. (integer value) +# Minimum value: 0 +# Deprecated group/name - [DEFAULT]/console_token_ttl +#token_ttl = 600 + + +[cors] + +# +# From oslo.middleware +# + +# Indicate whether this resource may be shared with the domain received in the +# requests "origin" header. Format: "://[:]", no trailing +# slash. Example: https://horizon.example.com (list value) +#allowed_origin = + +# Indicate that the actual request can include user credentials (boolean value) +#allow_credentials = true + +# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple +# Headers (list value) +#expose_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,X-Service-Token,X-OpenStack-Nova-API-Version,OpenStack-API-Version + +# Maximum cache age of CORS preflight requests (integer value) +#max_age = 3600 + +# Indicate which methods can be used during the actual request (list value) +#allow_methods = GET,PUT,POST,DELETE,PATCH + +# Indicate which header field names may be used during the actual request (list +# value) +#allow_headers = X-Auth-Token,X-Openstack-Request-Id,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Nova-API-Version,OpenStack-API-Version + + +[cyborg] +# +# Configuration options for Cyborg (accelerator as a service). + +# +# From nova.conf +# + +# PEM encoded Certificate Authority to use when verifying HTTPs connections +# (string value) +#cafile = + +# PEM encoded client certificate cert file (string value) +#certfile = + +# PEM encoded client certificate key file (string value) +#keyfile = + +# Verify HTTPS connections (boolean value) +#insecure = false + +# Timeout value for http requests (integer value) +#timeout = + +# Collect per-API call timing information (boolean value) +#collect_timing = false + +# Log requests to multiple loggers (boolean value) +#split_loggers = false + +# The default service_type for endpoint URL discovery (string value) +#service_type = accelerator + +# The default service_name for endpoint URL discovery (string value) +#service_name = + +# List of interfaces, in order of preference, for endpoint URL (list value) +#valid_interfaces = internal,public + +# The default region_name for endpoint URL discovery (string value) +#region_name = + +# Always use this endpoint URL for requests for this client. NOTE: The +# unversioned endpoint should be specified here; to request a particular API +# version, use the `version`, `min-version`, and/or `max-version` options +# (string value) +#endpoint_override = + +# The maximum number of retries that should be attempted for connection errors +# (integer value) +#connect_retries = + +# Delay (in seconds) between two retries for connection errors. If not set, +# exponential retry starting with 0.5 seconds up to a maximum of 60 seconds is +# used (floating point value) +#connect_retry_delay = + +# The maximum number of retries that should be attempted for retriable HTTP +# status codes (integer value) +#status_code_retries = + +# Delay (in seconds) between two retries for retriable status codes. If not set, +# exponential retry starting with 0.5 seconds up to a maximum of 60 seconds is +# used (floating point value) +#status_code_retry_delay = + + +[database] +{% if shared_db.nova -%} +connection = {{ shared_db.nova.connection }} +{% else -%} +connection = sqlite:////var/lib/nova/nova.sqlite +{% endif -%} + +# +# From oslo.db +# + +# If True, SQLite uses synchronous mode (boolean value) +#sqlite_synchronous = true + +# The back end to use for the database (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend = sqlalchemy + +# The SQLAlchemy connection string to use to connect to the database (string +# value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection = + +# The SQLAlchemy connection string to use to connect to the slave database +# (string value) +#slave_connection = + +# The SQL mode to be used for MySQL sessions. This option, including the +# default, overrides any server-set SQL mode. To use whatever SQL mode is set by +# the server configuration, set this to no value. Example: mysql_sql_mode= +# (string value) +#mysql_sql_mode = TRADITIONAL + +# If True, transparently enables support for handling MySQL Cluster (NDB) +# (boolean value) +#mysql_enable_ndb = false + +# Connections which have been present in the connection pool longer than this +# number of seconds will be replaced with a new one the next time they are +# checked out from the pool (integer value) +# Deprecated group/name - [DATABASE]/idle_timeout +# Deprecated group/name - [database]/idle_timeout +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#connection_recycle_time = 3600 + +# Maximum number of SQL connections to keep open in a pool. Setting a value of 0 +# indicates no limit (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size = 5 + +# Maximum number of database connection retries during startup. Set to -1 to +# specify an infinite retry count (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries = 10 + +# Interval between retries of opening a SQL connection (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval = 10 + +# If set, use this value for max_overflow with SQLAlchemy (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow = 50 + +# Verbosity of SQL debugging information: 0=None, 100=Everything (integer value) +# Minimum value: 0 +# Maximum value: 100 +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug = 0 + +# Add Python stack traces to SQL as comment strings (boolean value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace = false + +# If set, use this value for pool_timeout with SQLAlchemy (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout = + +# Enable the experimental use of database reconnect on connection lost (boolean +# value) +#use_db_reconnect = false + +# Seconds between retries of a database transaction (integer value) +#db_retry_interval = 1 + +# If True, increases the interval between retries of a database operation up to +# db_max_retry_interval (boolean value) +#db_inc_retry_interval = true + +# If db_inc_retry_interval is set, the maximum seconds between retries of a +# database operation (integer value) +#db_max_retry_interval = 10 + +# Maximum retries in case of connection error or deadlock error before error is +# raised. Set to -1 to specify an infinite retry count (integer value) +#db_max_retries = 20 + +# Optional URL parameters to append onto the connection URL at connect time; +# specify as param1=value1¶m2=value2& (string value) +#connection_parameters = + +# +# From oslo.db.concurrency +# + +# Enable the experimental use of thread pooling for all DB API calls (boolean +# value) +# Deprecated group/name - [DEFAULT]/dbapi_use_tpool +#use_tpool = false + + +[devices] + +# +# From nova.conf +# + +# +# The vGPU types enabled in the compute node. For more information, refer to the +# documentation. (list value) +#enabled_vgpu_types = + + +[ephemeral_storage_encryption] + +# +# From nova.conf +# + +# +# Enables/disables LVM ephemeral storage encryption. +# (boolean value) +#enabled = false + +# +# Cipher-mode string to be used. For more information, refer to the +# documentation. (string value) +#cipher = aes-xts-plain64 + +# +# Encryption key length in bits. For more information, refer to the +# documentation. (integer value) +# Minimum value: 1 +#key_size = 512 + + +[filter_scheduler] + +# +# From nova.conf +# + +# +# Size of subset of best hosts selected by scheduler. For more information, +# refer to the documentation. (integer value) +# Minimum value: 1 +# Deprecated group/name - [DEFAULT]/scheduler_host_subset_size +#host_subset_size = 1 + +# +# The number of instances that can be actively performing IO on a host. For more +# information, refer to the documentation. (integer value) +# Minimum value: 0 +#max_io_ops_per_host = 8 + +# +# Maximum number of instances that can exist on a host. For more information, +# refer to the documentation. (integer value) +# Minimum value: 1 +#max_instances_per_host = 50 + +# +# Enable querying of individual hosts for instance information. For more +# information, refer to the documentation. (boolean value) +# Deprecated group/name - [DEFAULT]/scheduler_tracks_instance_changes +#track_instance_changes = true + +# +# Filters that the scheduler can use. For more information, refer to the +# documentation. (multi valued) +# Deprecated group/name - [DEFAULT]/scheduler_available_filters +#available_filters = nova.scheduler.filters.all_filters + +# +# Filters that the scheduler will use. For more information, refer to the +# documentation. (list value) +# Deprecated group/name - [DEFAULT]/scheduler_default_filters +#enabled_filters = AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter + +# +# Weighers that the scheduler will use. For more information, refer to the +# documentation. (list value) +# Deprecated group/name - [DEFAULT]/scheduler_weight_classes +#weight_classes = nova.scheduler.weights.all_weighers + +# +# RAM weight multipler ratio. For more information, refer to the documentation. +# (floating point value) +#ram_weight_multiplier = 1.0 + +# +# CPU weight multiplier ratio. For more information, refer to the documentation. +# (floating point value) +#cpu_weight_multiplier = 1.0 + +# +# Disk weight multipler ratio. For more information, refer to the documentation. +# (floating point value) +#disk_weight_multiplier = 1.0 + +# +# IO operations weight multipler ratio. For more information, refer to the +# documentation. (floating point value) +#io_ops_weight_multiplier = -1.0 + +# +# PCI device affinity weight multiplier. For more information, refer to the +# documentation. (floating point value) +# Minimum value: 0.0 +#pci_weight_multiplier = 1.0 + +# +# Multiplier used for weighing hosts for group soft-affinity. For more +# information, refer to the documentation. (floating point value) +# Minimum value: 0.0 +#soft_affinity_weight_multiplier = 1.0 + +# +# Multiplier used for weighing hosts for group soft-anti-affinity. For more +# information, refer to the documentation. (floating point value) +# Minimum value: 0.0 +#soft_anti_affinity_weight_multiplier = 1.0 + +# +# Multiplier used for weighing hosts that have had recent build failures. For +# more information, refer to the documentation. (floating point value) +#build_failure_weight_multiplier = 1000000.0 + +# +# Multiplier used for weighing hosts during a cross-cell move. For more +# information, refer to the documentation. (floating point value) +#cross_cell_move_weight_multiplier = 1000000.0 + +# +# Enable spreading the instances between hosts with the same best weight. For +# more information, refer to the documentation. (boolean value) +#shuffle_best_same_weighed_hosts = false + +# +# The default architecture to be used when using the image properties filter. +# For more information, refer to the documentation. (string value) +# Possible values: +# alpha - +# armv6 - +# armv7l - +# armv7b - +# aarch64 - +# cris - +# i686 - +# ia64 - +# lm32 - +# m68k - +# microblaze - +# microblazeel - +# mips - +# mipsel - +# mips64 - +# mips64el - +# openrisc - +# parisc - +# parisc64 - +# ppc - +# ppcle - +# ppc64 - +# ppc64le - +# ppcemb - +# s390 - +# s390x - +# sh4 - +# sh4eb - +# sparc - +# sparc64 - +# unicore32 - +# x86_64 - +# xtensa - +# xtensaeb - +#image_properties_default_architecture = + +# +# List of UUIDs for images that can only be run on certain hosts. For more +# information, refer to the documentation. (list value) +#isolated_images = + +# +# List of hosts that can only run certain images. For more information, refer to +# the documentation. (list value) +#isolated_hosts = + +# +# Prevent non-isolated images from being built on isolated hosts. For more +# information, refer to the documentation. (boolean value) +#restrict_isolated_hosts_to_isolated_images = true + +# +# Image property namespace for use in the host aggregate. For more information, +# refer to the documentation. (string value) +#aggregate_image_properties_isolation_namespace = + +# +# Separator character(s) for image property namespace and name. For more +# information, refer to the documentation. (string value) +#aggregate_image_properties_isolation_separator = . + + +[glance] +service_type = image +service_name = glance +valid_interfaces = internal +region_name = {{ options.region }} + +# Configuration options for the Image service + +# +# From nova.conf +# + +# DEPRECATED: +# List of glance api servers endpoints available to nova. For more information, +# refer to the documentation. (list value) +# This option is deprecated for removal since 21.0.0. +# Its value may be silently ignored in the future. +# Reason: +# Support for image service configuration via standard keystoneauth1 Adapter +# options was added in the 17.0.0 Queens release. The api_servers option was +# retained temporarily to allow consumers time to cut over to a real load +# balancing solution. +#api_servers = + +# +# Enable glance operation retries. For more information, refer to the +# documentation. (integer value) +# Minimum value: 0 +#num_retries = 3 + +# +# Enable image signature verification. For more information, refer to the +# documentation. (boolean value) +#verify_glance_signatures = false + +# DEPRECATED: +# Enable certificate validation for image signature verification. For more +# information, refer to the documentation. (boolean value) +# This option is deprecated for removal since 16.0.0. +# Its value may be silently ignored in the future. +# Reason: +# This option is intended to ease the transition for deployments leveraging +# image signature verification. The intended state long-term is for signature +# verification and certificate validation to always happen together. +#enable_certificate_validation = false + +# +# List of certificate IDs for certificates that should be trusted. For more +# information, refer to the documentation. (list value) +#default_trusted_certificate_ids = + +# +# Enable download of Glance images directly via RBD. For more information, refer +# to the documentation. (boolean value) +#enable_rbd_download = false + +# +# The RADOS client name for accessing Glance images stored as rbd volumes. For +# more information, refer to the documentation. (string value) +#rbd_user = + +# +# The RADOS client timeout in seconds when initially connecting to the cluster. +# For more information, refer to the documentation. (integer value) +#rbd_connect_timeout = 5 + +# +# The RADOS pool in which the Glance images are stored as rbd volumes. For more +# information, refer to the documentation. (string value) +#rbd_pool = + +# +# Path to the ceph configuration file to use. For more information, refer to the +# documentation. (string value) +#rbd_ceph_conf = + +# Enable or disable debug logging with glanceclient (boolean value) +#debug = false + +# PEM encoded Certificate Authority to use when verifying HTTPs connections +# (string value) +#cafile = + +# PEM encoded client certificate cert file (string value) +#certfile = + +# PEM encoded client certificate key file (string value) +#keyfile = + +# Verify HTTPS connections (boolean value) +#insecure = false + +# Timeout value for http requests (integer value) +#timeout = + +# Collect per-API call timing information (boolean value) +#collect_timing = false + +# Log requests to multiple loggers (boolean value) +#split_loggers = false + +# The default service_type for endpoint URL discovery (string value) +#service_type = image + +# The default service_name for endpoint URL discovery (string value) +#service_name = + +# List of interfaces, in order of preference, for endpoint URL (list value) +#valid_interfaces = internal,public + +# The default region_name for endpoint URL discovery (string value) +#region_name = + +# Always use this endpoint URL for requests for this client. NOTE: The +# unversioned endpoint should be specified here; to request a particular API +# version, use the `version`, `min-version`, and/or `max-version` options +# (string value) +#endpoint_override = + +# The maximum number of retries that should be attempted for connection errors +# (integer value) +#connect_retries = + +# Delay (in seconds) between two retries for connection errors. If not set, +# exponential retry starting with 0.5 seconds up to a maximum of 60 seconds is +# used (floating point value) +#connect_retry_delay = + +# The maximum number of retries that should be attempted for retriable HTTP +# status codes (integer value) +#status_code_retries = + +# Delay (in seconds) between two retries for retriable status codes. If not set, +# exponential retry starting with 0.5 seconds up to a maximum of 60 seconds is +# used (floating point value) +#status_code_retry_delay = + + +[guestfs] +# +# libguestfs is a set of tools for accessing and modifying virtual +# machine (VM) disk images. You can use this for viewing and editing +# files inside guests, scripting changes to VMs, monitoring disk +# used/free statistics, creating guests, P2V, V2V, performing backups, +# cloning VMs, building VMs, formatting disks and resizing disks. + +# +# From nova.conf +# + +# +# Enable/disables guestfs logging. For more information, refer to the +# documentation. (boolean value) +#debug = false + + +[healthcheck] + +# +# From oslo.middleware +# + +# DEPRECATED: The path to respond to healtcheck requests on (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#path = /healthcheck + +# Show more detailed information as part of the response. Security note: +# Enabling this option may expose sensitive details about the service being +# monitored. Be sure to verify that it will not violate your security policies +# (boolean value) +#detailed = false + +# Additional backends that can perform health checks and report that information +# back as part of a request (list value) +#backends = + +# Check the presence of a file to determine if an application is running on a +# port. Used by DisableByFileHealthcheck plugin (string value) +#disable_by_file_path = + +# Check the presence of a file based on a port to determine if an application is +# running on a port. Expects a "port:path" list of strings. Used by +# DisableByFilesPortsHealthcheck plugin (list value) +#disable_by_file_paths = + + +[hyperv] +# +# The hyperv feature allows you to configure the Hyper-V hypervisor +# driver to be used within an OpenStack deployment. + +# +# From nova.conf +# + +# +# Dynamic memory ratio. For more information, refer to the documentation. +# (floating point value) +#dynamic_memory_ratio = 1.0 + +# +# Enable instance metrics collection. For more information, refer to the +# documentation. (boolean value) +#enable_instance_metrics_collection = false + +# +# Instances path share. For more information, refer to the documentation. +# (string value) +#instances_path_share = + +# +# Limit CPU features. For more information, refer to the documentation. (boolean +# value) +#limit_cpu_features = false + +# +# Mounted disk query retry count. For more information, refer to the +# documentation. (integer value) +# Minimum value: 0 +#mounted_disk_query_retry_count = 10 + +# +# Mounted disk query retry interval. For more information, refer to the +# documentation. (integer value) +# Minimum value: 0 +#mounted_disk_query_retry_interval = 5 + +# +# Power state check timeframe. For more information, refer to the documentation. +# (integer value) +# Minimum value: 0 +#power_state_check_timeframe = 60 + +# +# Power state event polling interval. For more information, refer to the +# documentation. (integer value) +# Minimum value: 0 +#power_state_event_polling_interval = 2 + +# +# qemu-img command. For more information, refer to the documentation. (string +# value) +#qemu_img_cmd = qemu-img.exe + +# +# External virtual switch name. For more information, refer to the +# documentation. (string value) +#vswitch_name = + +# +# Wait soft reboot seconds. For more information, refer to the documentation. +# (integer value) +# Minimum value: 0 +#wait_soft_reboot_seconds = 60 + +# +# Mount config drive as a CD drive. For more information, refer to the +# documentation. (boolean value) +#config_drive_cdrom = false + +# +# Inject password to config drive. For more information, refer to the +# documentation. (boolean value) +#config_drive_inject_password = false + +# +# Volume attach retry count. For more information, refer to the documentation. +# (integer value) +# Minimum value: 0 +#volume_attach_retry_count = 10 + +# +# Volume attach retry interval. For more information, refer to the +# documentation. (integer value) +# Minimum value: 0 +#volume_attach_retry_interval = 5 + +# +# Enable RemoteFX feature. For more information, refer to the documentation. +# (boolean value) +#enable_remotefx = false + +# +# Use multipath connections when attaching iSCSI or FC disks. For more +# information, refer to the documentation. (boolean value) +#use_multipath_io = false + +# +# List of iSCSI initiators that will be used for estabilishing iSCSI sessions. +# For more information, refer to the documentation. (list value) +#iscsi_initiator_list = + + +[image_cache] +# +# A collection of options specific to image caching. + +# +# From nova.conf +# + +# +# Number of seconds to wait between runs of the image cache manager. For more +# information, refer to the documentation. (integer value) +# Minimum value: -1 +# Deprecated group/name - [DEFAULT]/image_cache_manager_interval +#manager_interval = 2400 + +# +# Location of cached images. For more information, refer to the documentation. +# (string value) +# Deprecated group/name - [DEFAULT]/image_cache_subdirectory_name +#subdirectory_name = _base + +# Should unused base images be removed? (boolean value) +#remove_unused_base_images = true + +# +# Unused unresized base images younger than this will not be removed. +# (integer value) +#remove_unused_original_minimum_age_seconds = 86400 + +# +# Unused resized base images younger than this will not be removed. +# (integer value) +#remove_unused_resized_minimum_age_seconds = 3600 + +# +# Maximum number of compute hosts to trigger image precaching in parallel. For +# more information, refer to the documentation. (integer value) +# Minimum value: 1 +#precache_concurrency = 1 + + +[ironic] +# +# Configuration options for Ironic driver (Bare Metal). +# If using the Ironic driver following options must be set: +# * auth_type +# * auth_url +# * project_name +# * username +# * password +# * project_domain_id or project_domain_name +# * user_domain_id or user_domain_name + +# +# From nova.conf +# + +# +# The number of times to retry when a request conflicts. +# If set to 0, only try once, no retries. For more information, refer to the +# documentation. (integer value) +# Minimum value: 0 +#api_max_retries = 60 + +# +# The number of seconds to wait before retrying the request. For more +# information, refer to the documentation. (integer value) +# Minimum value: 0 +#api_retry_interval = 2 + +# Timeout (seconds) to wait for node serial console state changed. Set to 0 to +# disable timeout (integer value) +# Minimum value: 0 +#serial_console_state_timeout = 10 + +# Case-insensitive key to limit the set of nodes that may be managed by this +# service to the set of nodes in Ironic which have a matching conductor_group +# property. If unset, all available nodes will be eligible to be managed by this +# service. Note that setting this to the empty string (``""``) will match the +# default conductor group, and is different than leaving the option unset +# (string value) +# Note: This option can be changed without restarting. +#partition_key = + +# List of hostnames for all nova-compute services (including this host) with +# this partition_key config value. Nodes matching the partition_key value will +# be distributed between all services specified here. If partition_key is unset, +# this option is ignored (list value) +# Note: This option can be changed without restarting. +#peer_list = + +# PEM encoded Certificate Authority to use when verifying HTTPs connections +# (string value) +#cafile = + +# PEM encoded client certificate cert file (string value) +#certfile = + +# PEM encoded client certificate key file (string value) +#keyfile = + +# Verify HTTPS connections (boolean value) +#insecure = false + +# Timeout value for http requests (integer value) +#timeout = + +# Collect per-API call timing information (boolean value) +#collect_timing = false + +# Log requests to multiple loggers (boolean value) +#split_loggers = false + +# Authentication type to load (string value) +# Deprecated group/name - [ironic]/auth_plugin +#auth_type = + +# Config Section from which to load plugin specific options (string value) +#auth_section = + +# Authentication URL (string value) +#auth_url = + +# Scope for system operations (string value) +#system_scope = + +# Domain ID to scope to (string value) +#domain_id = + +# Domain name to scope to (string value) +#domain_name = + +# Project ID to scope to (string value) +#project_id = + +# Project name to scope to (string value) +#project_name = + +# Domain ID containing project (string value) +#project_domain_id = + +# Domain name containing project (string value) +#project_domain_name = + +# Trust ID (string value) +#trust_id = + +# User ID (string value) +#user_id = + +# Username (string value) +# Deprecated group/name - [ironic]/user_name +#username = + +# User's domain id (string value) +#user_domain_id = + +# User's domain name (string value) +#user_domain_name = + +# User's password (string value) +#password = + +# The default service_type for endpoint URL discovery (string value) +#service_type = baremetal + +# The default service_name for endpoint URL discovery (string value) +#service_name = + +# List of interfaces, in order of preference, for endpoint URL (list value) +#valid_interfaces = internal,public + +# The default region_name for endpoint URL discovery (string value) +#region_name = + +# Always use this endpoint URL for requests for this client. NOTE: The +# unversioned endpoint should be specified here; to request a particular API +# version, use the `version`, `min-version`, and/or `max-version` options +# (string value) +#endpoint_override = + +# The maximum number of retries that should be attempted for connection errors +# (integer value) +#connect_retries = + +# Delay (in seconds) between two retries for connection errors. If not set, +# exponential retry starting with 0.5 seconds up to a maximum of 60 seconds is +# used (floating point value) +#connect_retry_delay = + +# The maximum number of retries that should be attempted for retriable HTTP +# status codes (integer value) +#status_code_retries = + +# Delay (in seconds) between two retries for retriable status codes. If not set, +# exponential retry starting with 0.5 seconds up to a maximum of 60 seconds is +# used (floating point value) +#status_code_retry_delay = + + +[key_manager] + +# +# From nova.conf +# + +# +# Fixed key returned by key manager, specified in hex. For more information, +# refer to the documentation. (string value) +#fixed_key = + +# Specify the key manager implementation. Options are "barbican" and "vault". +# Default is "barbican". Will support the values earlier set using +# [key_manager]/api_class for some time (string value) +# Deprecated group/name - [key_manager]/api_class +#backend = barbican + +# The type of authentication credential to create. Possible values are 'token', +# 'password', 'keystone_token', and 'keystone_password'. Required if no context +# is passed to the credential factory (string value) +#auth_type = + +# Token for authentication. Required for 'token' and 'keystone_token' auth_type +# if no context is passed to the credential factory (string value) +#token = + +# Username for authentication. Required for 'password' auth_type. Optional for +# the 'keystone_password' auth_type (string value) +#username = + +# Password for authentication. Required for 'password' and 'keystone_password' +# auth_type (string value) +#password = + +# Use this endpoint to connect to Keystone (string value) +#auth_url = + +# User ID for authentication. Optional for 'keystone_token' and +# 'keystone_password' auth_type (string value) +#user_id = + +# User's domain ID for authentication. Optional for 'keystone_token' and +# 'keystone_password' auth_type (string value) +#user_domain_id = + +# User's domain name for authentication. Optional for 'keystone_token' and +# 'keystone_password' auth_type (string value) +#user_domain_name = + +# Trust ID for trust scoping. Optional for 'keystone_token' and +# 'keystone_password' auth_type (string value) +#trust_id = + +# Domain ID for domain scoping. Optional for 'keystone_token' and +# 'keystone_password' auth_type (string value) +#domain_id = + +# Domain name for domain scoping. Optional for 'keystone_token' and +# 'keystone_password' auth_type (string value) +#domain_name = + +# Project ID for project scoping. Optional for 'keystone_token' and +# 'keystone_password' auth_type (string value) +#project_id = + +# Project name for project scoping. Optional for 'keystone_token' and +# 'keystone_password' auth_type (string value) +#project_name = + +# Project's domain ID for project. Optional for 'keystone_token' and +# 'keystone_password' auth_type (string value) +#project_domain_id = + +# Project's domain name for project. Optional for 'keystone_token' and +# 'keystone_password' auth_type (string value) +#project_domain_name = + +# Allow fetching a new token if the current one is going to expire. Optional for +# 'keystone_token' and 'keystone_password' auth_type (boolean value) +#reauthenticate = true + + +[keystone] +# Configuration options for the identity service + +# +# From nova.conf +# + +# PEM encoded Certificate Authority to use when verifying HTTPs connections +# (string value) +#cafile = + +# PEM encoded client certificate cert file (string value) +#certfile = + +# PEM encoded client certificate key file (string value) +#keyfile = + +# Verify HTTPS connections (boolean value) +#insecure = false + +# Timeout value for http requests (integer value) +#timeout = + +# Collect per-API call timing information (boolean value) +#collect_timing = false + +# Log requests to multiple loggers (boolean value) +#split_loggers = false + +# The default service_type for endpoint URL discovery (string value) +#service_type = identity + +# The default service_name for endpoint URL discovery (string value) +#service_name = + +# List of interfaces, in order of preference, for endpoint URL (list value) +#valid_interfaces = internal,public + +# The default region_name for endpoint URL discovery (string value) +#region_name = + +# Always use this endpoint URL for requests for this client. NOTE: The +# unversioned endpoint should be specified here; to request a particular API +# version, use the `version`, `min-version`, and/or `max-version` options +# (string value) +#endpoint_override = + +# The maximum number of retries that should be attempted for connection errors +# (integer value) +#connect_retries = + +# Delay (in seconds) between two retries for connection errors. If not set, +# exponential retry starting with 0.5 seconds up to a maximum of 60 seconds is +# used (floating point value) +#connect_retry_delay = + +# The maximum number of retries that should be attempted for retriable HTTP +# status codes (integer value) +#status_code_retries = + +# Delay (in seconds) between two retries for retriable status codes. If not set, +# exponential retry starting with 0.5 seconds up to a maximum of 60 seconds is +# used (floating point value) +#status_code_retry_delay = + + +{% include "parts/section-identity" %} + + +[libvirt] +# +# Libvirt options allows cloud administrator to configure related +# libvirt hypervisor driver to be used within an OpenStack deployment. +# +# Almost all of the libvirt config options are influence by ``virt_type`` config +# which describes the virtualization type (or so called domain type) libvirt +# should use for specific features such as live migration, snapshot. + +# +# From nova.conf +# + +# +# The ID of the image to boot from to rescue data from a corrupted instance. For +# more information, refer to the documentation. (string value) +#rescue_image_id = + +# +# The ID of the kernel (AKI) image to use with the rescue image. For more +# information, refer to the documentation. (string value) +#rescue_kernel_id = + +# +# The ID of the RAM disk (ARI) image to use with the rescue image. For more +# information, refer to the documentation. (string value) +#rescue_ramdisk_id = + +# +# Describes the virtualization type (or so called domain type) libvirt should +# use. For more information, refer to the documentation. (string value) +# Possible values: +# kvm - +# lxc - +# qemu - +# parallels - +#virt_type = kvm + +# +# Overrides the default libvirt URI of the chosen virtualization type. For more +# information, refer to the documentation. (string value) +#connection_uri = + +# +# Allow the injection of an admin password for instance only at ``create`` and +# ``rebuild`` process. For more information, refer to the documentation. +# (boolean value) +#inject_password = false + +# +# Allow the injection of an SSH key at boot time. For more information, refer to +# the documentation. (boolean value) +#inject_key = false + +# +# Determines how the file system is chosen to inject data into it. For more +# information, refer to the documentation. (integer value) +# Minimum value: -2 +#inject_partition = -2 + +# +# URI scheme for live migration used by the source of live migration traffic. +# For more information, refer to the documentation. (string value) +#live_migration_scheme = + +# +# IP address used as the live migration address for this host. For more +# information, refer to the documentation. (host address value) +#live_migration_inbound_addr = + +# DEPRECATED: +# Live migration target URI used by the source of live migration traffic. For +# more information, refer to the documentation. (string value) +# This option is deprecated for removal since 15.0.0. +# Its value may be silently ignored in the future. +# Reason: +# live_migration_uri is deprecated for removal in favor of two other options +# that +# allow to change live migration scheme and target URI: +# ``live_migration_scheme`` +# and ``live_migration_inbound_addr`` respectively. +#live_migration_uri = + +# DEPRECATED: +# Enable tunnelled migration. For more information, refer to the documentation. +# (boolean value) +# This option is deprecated for removal since 23.0.0. +# Its value may be silently ignored in the future. +# Reason: +# The "tunnelled live migration" has two inherent limitations: it cannot +# handle live migration of disks in a non-shared storage setup; and it has +# a huge performance cost. Both these problems are solved by +# ``live_migration_with_native_tls`` (requires a pre-configured TLS +# environment), which is the recommended approach for securing all live +# migration streams. +#live_migration_tunnelled = false + +# +# Maximum bandwidth(in MiB/s) to be used during migration. For more information, +# refer to the documentation. (integer value) +#live_migration_bandwidth = 0 + +# +# Maximum permitted downtime, in milliseconds, for live migration +# switchover. For more information, refer to the documentation. (integer value) +# Minimum value: 100 +#live_migration_downtime = 500 + +# +# Number of incremental steps to reach max downtime value. For more information, +# refer to the documentation. (integer value) +# Minimum value: 3 +#live_migration_downtime_steps = 10 + +# +# Time to wait, in seconds, between each step increase of the migration +# downtime. For more information, refer to the documentation. (integer value) +# Minimum value: 3 +#live_migration_downtime_delay = 75 + +# +# Time to wait, in seconds, for migration to successfully complete transferring +# data before aborting the operation. For more information, refer to the +# documentation. (integer value) +# Minimum value: 0 +# Note: This option can be changed without restarting. +#live_migration_completion_timeout = 800 + +# +# This option will be used to determine what action will be taken against a +# VM after ``live_migration_completion_timeout`` expires. By default, the live +# migrate operation will be aborted after completion timeout. If it is set to +# ``force_complete``, the compute service will either pause the VM or trigger +# post-copy depending on if post copy is enabled and available +# (``live_migration_permit_post_copy`` is set to True). For more information, +# refer to the documentation. (string value) +# Possible values: +# abort - +# force_complete - +# Note: This option can be changed without restarting. +#live_migration_timeout_action = abort + +# +# This option allows nova to switch an on-going live migration to post-copy +# mode, i.e., switch the active VM to the one on the destination node before the +# migration is complete, therefore ensuring an upper bound on the memory that +# needs to be transferred. Post-copy requires libvirt>=1.3.3 and QEMU>=2.5.0. +# For more information, refer to the documentation. (boolean value) +#live_migration_permit_post_copy = false + +# +# This option allows nova to start live migration with auto converge on. For +# more information, refer to the documentation. (boolean value) +#live_migration_permit_auto_converge = false + +# +# Determine the snapshot image format when sending to the image service. For +# more information, refer to the documentation. (string value) +# Possible values: +# raw - RAW disk format +# qcow2 - KVM default disk format +# vmdk - VMWare default disk format +# vdi - VirtualBox default disk format +#snapshot_image_format = + +# +# Use QEMU-native TLS encryption when live migrating. For more information, +# refer to the documentation. (boolean value) +#live_migration_with_native_tls = false + +# +# Override the default disk prefix for the devices attached to an instance. For +# more information, refer to the documentation. (string value) +#disk_prefix = + +# Number of seconds to wait for instance to shut down after soft reboot request +# is made. We fall back to hard reboot if instance does not shutdown within this +# window (integer value) +#wait_soft_reboot_seconds = 120 + +# +# Is used to set the CPU mode an instance should have. For more information, +# refer to the documentation. (string value) +# Possible values: +# host-model - Clone the host CPU feature flags +# host-passthrough - Use the host CPU model exactly +# custom - Use the CPU model in ``[libvirt]cpu_models`` +# none - Don't set a specific CPU model. For instances with ``[libvirt] +# virt_type`` as KVM/QEMU, the default CPU model from QEMU will be used, which +# provides a basic set of CPU features that are compatible with most hosts +#cpu_mode = + +# +# An ordered list of CPU models the host supports. For more information, refer +# to the documentation. (list value) +# Deprecated group/name - [libvirt]/cpu_model +#cpu_models = + +# +# Enable or disable guest CPU flags. For more information, refer to the +# documentation. (list value) +#cpu_model_extra_flags = + +# Location where libvirt driver will store snapshots before uploading them to +# image service (string value) +#snapshots_directory = $instances_path/snapshots + +# +# Specific cache modes to use for different disk types. For more information, +# refer to the documentation. (list value) +#disk_cachemodes = + +# +# The path to an RNG (Random Number Generator) device that will be used as +# the source of entropy on the host. Since libvirt 1.3.4, any path (that +# returns random numbers when read) is accepted. The recommended source +# of entropy is ``/dev/urandom`` -- it is non-blocking, therefore +# relatively fast; and avoids the limitations of ``/dev/random``, which is +# a legacy interface. For more details (and comparision between different +# RNG sources), refer to the "Usage" section in the Linux kernel API +# documentation for ``[u]random``: +# http://man7.org/linux/man-pages/man4/urandom.4.html and +# http://man7.org/linux/man-pages/man7/random.7.html. +# (string value) +#rng_dev_path = /dev/urandom + +# For qemu or KVM guests, set this option to specify a default machine type per +# host architecture. You can find a list of supported machine types in your +# environment by checking the output of the :command:`virsh capabilities` +# command. The format of the value for this config option is ``host- +# arch=machine-type``. For example: ``x86_64=machinetype1,armv7l=machinetype2`` +# (list value) +#hw_machine_type = + +# +# The data source used to the populate the host "serial" UUID exposed to guest +# in the virtual BIOS. All choices except ``unique`` will change the serial when +# migrating the instance to another host. Changing the choice of this option +# will +# also affect existing instances on this host once they are stopped and started +# again. It is recommended to use the default choice (``unique``) since that +# will +# not change when an instance is migrated. However, if you have a need for +# per-host serials in addition to per-instance serial numbers, then consider +# restricting flavors via host aggregates. +# (string value) +# Possible values: +# none - A serial number entry is not added to the guest domain xml. +# os - A UUID serial number is generated from the host ``/etc/machine-id`` file. +# hardware - A UUID for the host hardware as reported by libvirt. This is +# typically from the host SMBIOS data, unless it has been overridden in +# ``libvirtd.conf``. +# auto - Uses the "os" source if possible, else "hardware". +# unique - Uses instance UUID as the serial number. +#sysinfo_serial = unique + +# A number of seconds to memory usage statistics period. Zero or negative value +# mean to disable memory usage statistics (integer value) +#mem_stats_period_seconds = 10 + +# List of uid targets and ranges.Syntax is guest-uid:host-uid:count. Maximum of +# 5 allowed (list value) +#uid_maps = + +# List of guid targets and ranges.Syntax is guest-gid:host-gid:count. Maximum of +# 5 allowed (list value) +#gid_maps = + +# In a realtime host context vCPUs for guest will run in that scheduling +# priority. Priority depends on the host kernel (usually 1-99) (integer value) +#realtime_scheduler_priority = 1 + +# +# Performance events to monitor and collect statistics for. For more +# information, refer to the documentation. (list value) +#enabled_perf_events = + +# +# The number of PCIe ports an instance will get. For more information, refer to +# the documentation. (integer value) +# Minimum value: 0 +# Maximum value: 28 +#num_pcie_ports = 0 + +# +# Available capacity in MiB for file-backed memory. For more information, refer +# to the documentation. (integer value) +# Minimum value: 0 +#file_backed_memory = 0 + +# +# Maximum number of guests with encrypted memory which can run +# concurrently on this compute host. For more information, refer to the +# documentation. (integer value) +# Minimum value: 0 +#num_memory_encrypted_guests = + +# +# Maximum number of attempts the driver tries to detach a device in libvirt. For +# more information, refer to the documentation. (integer value) +# Minimum value: 1 +#device_detach_attempts = 8 + +# +# Maximum number of seconds the driver waits for the success or the failure +# event from libvirt for a given device detach attempt before it re-trigger the +# detach. For more information, refer to the documentation. (integer value) +# Minimum value: 1 +#device_detach_timeout = 20 + +# +# VM Images format. For more information, refer to the documentation. (string +# value) +# Possible values: +# raw - +# flat - +# qcow2 - +# lvm - +# rbd - +# ploop - +# default - +#images_type = default + +# +# LVM Volume Group that is used for VM images, when you specify images_type=lvm. +# For more information, refer to the documentation. (string value) +#images_volume_group = + +# DEPRECATED: +# Create sparse logical volumes (with virtualsize) if this flag is set to True. +# (boolean value) +# This option is deprecated for removal since 18.0.0. +# Its value may be silently ignored in the future. +# Reason: +# Sparse logical volumes is a feature that is not tested hence not supported. +# LVM logical volumes are preallocated by default. If you want thin +# provisioning, +# use Cinder thin-provisioned volumes. +#sparse_logical_volumes = false + +# The RADOS pool in which rbd volumes are stored (string value) +#images_rbd_pool = rbd + +# Path to the ceph configuration file to use (string value) +#images_rbd_ceph_conf = + +# +# The name of the Glance store that represents the rbd cluster in use by +# this node. If set, this will allow Nova to request that Glance copy an +# image from an existing non-local store into the one named by this option +# before booting so that proper Copy-on-Write behavior is maintained. For more +# information, refer to the documentation. (string value) +#images_rbd_glance_store_name = + +# +# The interval in seconds with which to poll Glance after asking for it +# to copy an image to the local rbd store. This affects how often we ask +# Glance to report on copy completion, and thus should be short enough that +# we notice quickly, but not too aggressive that we generate undue load on +# the Glance server. For more information, refer to the documentation. (integer +# value) +#images_rbd_glance_copy_poll_interval = 15 + +# +# The overall maximum time we will wait for Glance to complete an image +# copy to our local rbd store. This should be long enough to allow large +# images to be copied over the network link between our local store and the +# one where images typically reside. The downside of setting this too long +# is just to catch the case where the image copy is stalled or proceeding too +# slowly to be useful. Actual errors will be reported by Glance and noticed +# according to the poll interval. For more information, refer to the +# documentation. (integer value) +#images_rbd_glance_copy_timeout = 600 + +# +# Discard option for nova managed disks. For more information, refer to the +# documentation. (string value) +# Possible values: +# ignore - +# unmap - +#hw_disk_discard = + +# +# Method used to wipe ephemeral disks when they are deleted. Only takes effect +# if LVM is set as backing storage. For more information, refer to the +# documentation. (string value) +# Possible values: +# zero - Overwrite volumes with zeroes +# shred - Overwrite volumes repeatedly +# none - Do not wipe deleted volumes +#volume_clear = zero + +# +# Size of area in MiB, counting from the beginning of the allocated volume, +# that will be cleared using method set in ``volume_clear`` option. For more +# information, refer to the documentation. (integer value) +# Minimum value: 0 +#volume_clear_size = 0 + +# +# Enable snapshot compression for ``qcow2`` images. For more information, refer +# to the documentation. (boolean value) +#snapshot_compression = false + +# Use virtio for bridge interfaces with KVM/QEMU (boolean value) +#use_virtio_for_bridges = true + +# +# Use multipath connection of the iSCSI or FC volume. For more information, +# refer to the documentation. (boolean value) +# Deprecated group/name - [libvirt]/iscsi_use_multipath +#volume_use_multipath = false + +# +# Number of times to scan given storage protocol to find volume. +# (integer value) +# Deprecated group/name - [libvirt]/num_iscsi_scan_tries +#num_volume_scan_tries = 5 + +# +# Number of times to rediscover AoE target to find volume. For more information, +# refer to the documentation. (integer value) +#num_aoe_discover_tries = 3 + +# +# The iSCSI transport iface to use to connect to target in case offload support +# is desired. For more information, refer to the documentation. (string value) +# Deprecated group/name - [libvirt]/iscsi_transport +#iscsi_iface = + +# +# Number of times to scan iSER target to find volume. For more information, +# refer to the documentation. (integer value) +#num_iser_scan_tries = 5 + +# +# Use multipath connection of the iSER volume. For more information, refer to +# the documentation. (boolean value) +#iser_use_multipath = false + +# +# The RADOS client name for accessing rbd(RADOS Block Devices) volumes. For more +# information, refer to the documentation. (string value) +#rbd_user = + +# +# The libvirt UUID of the secret for the rbd_user volumes. +# (string value) +#rbd_secret_uuid = + +# +# The RADOS client timeout in seconds when initially connecting to the cluster. +# (integer value) +#rbd_connect_timeout = 5 + +# +# Number of seconds to wait between each consecutive retry to destroy a +# RBD volume. For more information, refer to the documentation. (integer value) +# Minimum value: 0 +#rbd_destroy_volume_retry_interval = 5 + +# +# Number of retries to destroy a RBD volume. For more information, refer to the +# documentation. (integer value) +# Minimum value: 0 +#rbd_destroy_volume_retries = 12 + +# +# Directory where the NFS volume is mounted on the compute node. +# The default is 'mnt' directory of the location where nova's Python module +# is installed. For more information, refer to the documentation. (string value) +#nfs_mount_point_base = $state_path/mnt + +# +# Mount options passed to the NFS client. See section of the nfs man page +# for details. For more information, refer to the documentation. (string value) +#nfs_mount_options = + +# +# Directory where the Quobyte volume is mounted on the compute node. For more +# information, refer to the documentation. (string value) +#quobyte_mount_point_base = $state_path/mnt + +# Path to a Quobyte Client configuration file (string value) +#quobyte_client_cfg = + +# +# Directory where the SMBFS shares are mounted on the compute node. +# (string value) +#smbfs_mount_point_base = $state_path/mnt + +# +# Mount options passed to the SMBFS client. For more information, refer to the +# documentation. (string value) +#smbfs_mount_options = + +# +# libvirt's transport method for remote file operations. For more information, +# refer to the documentation. (string value) +# Possible values: +# ssh - +# rsync - +#remote_filesystem_transport = ssh + +# +# Directory where the Virtuozzo Storage clusters are mounted on the compute +# node. For more information, refer to the documentation. (string value) +#vzstorage_mount_point_base = $state_path/mnt + +# +# Mount owner user name. For more information, refer to the documentation. +# (string value) +#vzstorage_mount_user = stack + +# +# Mount owner group name. For more information, refer to the documentation. +# (string value) +#vzstorage_mount_group = qemu + +# +# Mount access mode. For more information, refer to the documentation. (string +# value) +#vzstorage_mount_perms = 0770 + +# +# Path to vzstorage client log. For more information, refer to the +# documentation. (string value) +#vzstorage_log_path = /var/log/vstorage/%(cluster_name)s/nova.log.gz + +# +# Path to the SSD cache file. For more information, refer to the documentation. +# (string value) +#vzstorage_cache_path = + +# +# Extra mount options for pstorage-mount. For more information, refer to the +# documentation. (list value) +#vzstorage_mount_opts = + +# +# Configure virtio rx queue size. For more information, refer to the +# documentation. (integer value) +# Possible values: +# 256 - +# 512 - +# 1024 - +#rx_queue_size = + +# +# Configure virtio tx queue size. For more information, refer to the +# documentation. (integer value) +# Possible values: +# 256 - +# 512 - +# 1024 - +#tx_queue_size = + +# +# The maximum number of virtio queue pairs that can be enabled +# when creating a multiqueue guest. The number of virtio queues +# allocated will be the lesser of the CPUs requested by the guest +# and the max value defined. By default, this value is set to none +# meaning the legacy limits based on the reported kernel +# major version will be used. +# (integer value) +# Minimum value: 1 +#max_queues = + +# +# Number of times to rediscover NVMe target to find volume. For more +# information, refer to the documentation. (integer value) +#num_nvme_discover_tries = 5 + +# +# Configure persistent memory(pmem) namespaces. For more information, refer to +# the documentation. (list value) +#pmem_namespaces = + +# +# Enable emulated TPM (Trusted Platform Module) in guests. +# (boolean value) +#swtpm_enabled = false + +# +# User that swtpm binary runs as. For more information, refer to the +# documentation. (string value) +#swtpm_user = tss + +# +# Group that swtpm binary runs as. For more information, refer to the +# documentation. (string value) +#swtpm_group = tss + + +[metrics] +# +# Configuration options for metrics +# +# Options under this group allow to adjust how values assigned to metrics are +# calculated. + +# +# From nova.conf +# + +# +# Multiplier used for weighing hosts based on reported metrics. For more +# information, refer to the documentation. (floating point value) +#weight_multiplier = 1.0 + +# +# Mapping of metric to weight modifier. For more information, refer to the +# documentation. (list value) +#weight_setting = + +# +# Whether metrics are required. For more information, refer to the +# documentation. (boolean value) +#required = true + +# +# Default weight for unavailable metrics. For more information, refer to the +# documentation. (floating point value) +#weight_of_unavailable = -10000.0 + + +[mks] +# +# Nova compute node uses WebMKS, a desktop sharing protocol to provide +# instance console access to VM's created by VMware hypervisors. +# +# Related options: +# Following options must be set to provide console access. +# * mksproxy_base_url +# * enabled + +# +# From nova.conf +# + +# +# Location of MKS web console proxy. For more information, refer to the +# documentation. (uri value) +#mksproxy_base_url = http://127.0.0.1:6090/ + +# +# Enables graphical console access for virtual machines. +# (boolean value) +#enabled = false + + +[neutron] +# +# Configuration options for neutron (network connectivity as a service). + +# +# From nova.conf +# + +# +# Default name for the Open vSwitch integration bridge. For more information, +# refer to the documentation. (string value) +#ovs_bridge = br-int + +# +# Default name for the floating IP pool. For more information, refer to the +# documentation. (string value) +#default_floating_pool = nova + +# +# Integer value representing the number of seconds to wait before querying +# Neutron for extensions. After this number of seconds the next time Nova +# needs to create a resource in Neutron it will requery Neutron for the +# extensions that it has loaded. Setting value to 0 will refresh the +# extensions with no wait. +# (integer value) +# Minimum value: 0 +#extension_sync_interval = 600 + +# +# List of physnets present on this host. For more information, refer to the +# documentation. (list value) +#physnets = + +# +# Number of times neutronclient should retry on any failed http call. For more +# information, refer to the documentation. (integer value) +# Minimum value: 0 +#http_retries = 3 + +# +# When set to True, this option indicates that Neutron will be used to proxy +# metadata requests and resolve instance ids. Otherwise, the instance ID must be +# passed to the metadata request in the 'X-Instance-ID' header. For more +# information, refer to the documentation. (boolean value) +#service_metadata_proxy = false + +# +# This option holds the shared secret string used to validate proxy requests to +# Neutron metadata requests. In order to be used, the +# 'X-Metadata-Provider-Signature' header must be supplied in the request. For +# more information, refer to the documentation. (string value) +#metadata_proxy_shared_secret = + +# PEM encoded Certificate Authority to use when verifying HTTPs connections +# (string value) +#cafile = + +# PEM encoded client certificate cert file (string value) +#certfile = + +# PEM encoded client certificate key file (string value) +#keyfile = + +# Verify HTTPS connections (boolean value) +#insecure = false + +# Timeout value for http requests (integer value) +#timeout = + +# Collect per-API call timing information (boolean value) +#collect_timing = false + +# Log requests to multiple loggers (boolean value) +#split_loggers = false + +# Authentication type to load (string value) +# Deprecated group/name - [neutron]/auth_plugin +#auth_type = + +# Config Section from which to load plugin specific options (string value) +#auth_section = + +# Authentication URL (string value) +#auth_url = + +# Scope for system operations (string value) +#system_scope = + +# Domain ID to scope to (string value) +#domain_id = + +# Domain name to scope to (string value) +#domain_name = + +# Project ID to scope to (string value) +#project_id = + +# Project name to scope to (string value) +#project_name = + +# Domain ID containing project (string value) +#project_domain_id = + +# Domain name containing project (string value) +#project_domain_name = + +# Trust ID (string value) +#trust_id = + +# Optional domain ID to use with v3 and v2 parameters. It will be used for both +# the user and project domain in v3 and ignored in v2 authentication (string +# value) +#default_domain_id = + +# Optional domain name to use with v3 API and v2 parameters. It will be used for +# both the user and project domain in v3 and ignored in v2 authentication +# (string value) +#default_domain_name = + +# User ID (string value) +#user_id = + +# Username (string value) +# Deprecated group/name - [neutron]/user_name +#username = + +# User's domain id (string value) +#user_domain_id = + +# User's domain name (string value) +#user_domain_name = + +# User's password (string value) +#password = + +# Tenant ID (string value) +#tenant_id = + +# Tenant Name (string value) +#tenant_name = + +# The default service_type for endpoint URL discovery (string value) +#service_type = network + +# The default service_name for endpoint URL discovery (string value) +#service_name = + +# List of interfaces, in order of preference, for endpoint URL (list value) +#valid_interfaces = internal,public + +# The default region_name for endpoint URL discovery (string value) +#region_name = + +# Always use this endpoint URL for requests for this client. NOTE: The +# unversioned endpoint should be specified here; to request a particular API +# version, use the `version`, `min-version`, and/or `max-version` options +# (string value) +#endpoint_override = + +# The maximum number of retries that should be attempted for connection errors +# (integer value) +#connect_retries = + +# Delay (in seconds) between two retries for connection errors. If not set, +# exponential retry starting with 0.5 seconds up to a maximum of 60 seconds is +# used (floating point value) +#connect_retry_delay = + +# The maximum number of retries that should be attempted for retriable HTTP +# status codes (integer value) +#status_code_retries = + +# Delay (in seconds) between two retries for retriable status codes. If not set, +# exponential retry starting with 0.5 seconds up to a maximum of 60 seconds is +# used (floating point value) +#status_code_retry_delay = + + +[notifications] +# +# Most of the actions in Nova which manipulate the system state generate +# notifications which are posted to the messaging component (e.g. RabbitMQ) and +# can be consumed by any service outside the OpenStack. More technical details +# at https://docs.openstack.org/nova/latest/reference/notifications.html + +# +# From nova.conf +# + +# +# If set, send compute.instance.update notifications on +# instance state changes. For more information, refer to the documentation. +# (string value) +# Possible values: +# - no notifications +# vm_state - Notifications are sent with VM state transition information in the +# ``old_state`` and ``state`` fields. The ``old_task_state`` and +# ``new_task_state`` fields will be set to the current task_state of the +# instance +# vm_and_task_state - Notifications are sent with VM and task state transition +# information +#notify_on_state_change = + +# Default notification level for outgoing notifications (string value) +# Possible values: +# DEBUG - +# INFO - +# WARN - +# ERROR - +# CRITICAL - +# Deprecated group/name - [DEFAULT]/default_notification_level +#default_level = INFO + +# +# Specifies which notification format shall be emitted by nova. For more +# information, refer to the documentation. (string value) +# Possible values: +# both - Both the legacy unversioned and the new versioned notifications are +# emitted +# versioned - Only the new versioned notifications are emitted +# unversioned - Only the legacy unversioned notifications are emitted +#notification_format = unversioned + +# +# Specifies the topics for the versioned notifications issued by nova. For more +# information, refer to the documentation. (list value) +#versioned_notifications_topics = versioned_notifications + +# +# If enabled, include block device information in the versioned notification +# payload. Sending block device information is disabled by default as providing +# that information can incur some overhead on the system since the information +# may need to be loaded from the database. +# (boolean value) +#bdms_in_notifications = false + + +[oslo_concurrency] + +# +# From oslo.concurrency +# + +# Enables or disables inter-process locks (boolean value) +#disable_process_locking = false + +# Directory to use for lock files. For security, the specified directory should +# only be writable by the user running the processes that need locking. Defaults +# to environment variable OSLO_LOCK_PATH. If external locks are used, a lock +# path must be set (string value) +#lock_path = + + +[oslo_messaging_amqp] + +# +# From oslo.messaging +# + +# Name for the AMQP container. must be globally unique. Defaults to a generated +# UUID (string value) +#container_name = + +# Timeout for inactive connections (in seconds) (integer value) +#idle_timeout = 0 + +# Debug: dump AMQP frames to stdout (boolean value) +#trace = false + +# Attempt to connect via SSL. If no other ssl-related parameters are given, it +# will use the system's CA-bundle to verify the server's certificate (boolean +# value) +#ssl = false + +# CA certificate PEM file used to verify the server's certificate (string value) +#ssl_ca_file = + +# Self-identifying certificate PEM file for client authentication (string value) +#ssl_cert_file = + +# Private key PEM file used to sign ssl_cert_file certificate (optional) (string +# value) +#ssl_key_file = + +# Password for decrypting ssl_key_file (if encrypted) (string value) +#ssl_key_password = + +# By default SSL checks that the name in the server's certificate matches the +# hostname in the transport_url. In some configurations it may be preferable to +# use the virtual hostname instead, for example if the server uses the Server +# Name Indication TLS extension (rfc6066) to provide a certificate per virtual +# host. Set ssl_verify_vhost to True if the server's SSL certificate uses the +# virtual host name instead of the DNS name (boolean value) +#ssl_verify_vhost = false + +# Space separated list of acceptable SASL mechanisms (string value) +#sasl_mechanisms = + +# Path to directory that contains the SASL configuration (string value) +#sasl_config_dir = + +# Name of configuration file (without .conf suffix) (string value) +#sasl_config_name = + +# SASL realm to use if no realm present in username (string value) +#sasl_default_realm = + +# Seconds to pause before attempting to re-connect (integer value) +# Minimum value: 1 +#connection_retry_interval = 1 + +# Increase the connection_retry_interval by this many seconds after each +# unsuccessful failover attempt (integer value) +# Minimum value: 0 +#connection_retry_backoff = 2 + +# Maximum limit for connection_retry_interval + connection_retry_backoff +# (integer value) +# Minimum value: 1 +#connection_retry_interval_max = 30 + +# Time to pause between re-connecting an AMQP 1.0 link that failed due to a +# recoverable error (integer value) +# Minimum value: 1 +#link_retry_delay = 10 + +# The maximum number of attempts to re-send a reply message which failed due to +# a recoverable error (integer value) +# Minimum value: -1 +#default_reply_retry = 0 + +# The deadline for an rpc reply message delivery (integer value) +# Minimum value: 5 +#default_reply_timeout = 30 + +# The deadline for an rpc cast or call message delivery. Only used when caller +# does not provide a timeout expiry (integer value) +# Minimum value: 5 +#default_send_timeout = 30 + +# The deadline for a sent notification message delivery. Only used when caller +# does not provide a timeout expiry (integer value) +# Minimum value: 5 +#default_notify_timeout = 30 + +# The duration to schedule a purge of idle sender links. Detach link after +# expiry (integer value) +# Minimum value: 1 +#default_sender_link_timeout = 600 + +# Indicates the addressing mode used by the driver. +# Permitted values: +# 'legacy' - use legacy non-routable addressing +# 'routable' - use routable addresses +# 'dynamic' - use legacy addresses if the message bus does not support routing +# otherwise use routable addressing (string value) +#addressing_mode = dynamic + +# Enable virtual host support for those message buses that do not natively +# support virtual hosting (such as qpidd). When set to true the virtual host +# name will be added to all message bus addresses, effectively creating a +# private 'subnet' per virtual host. Set to False if the message bus supports +# virtual hosting using the 'hostname' field in the AMQP 1.0 Open performative +# as the name of the virtual host (boolean value) +#pseudo_vhost = true + +# address prefix used when sending to a specific server (string value) +#server_request_prefix = exclusive + +# address prefix used when broadcasting to all servers (string value) +#broadcast_prefix = broadcast + +# address prefix when sending to any server in group (string value) +#group_request_prefix = unicast + +# Address prefix for all generated RPC addresses (string value) +#rpc_address_prefix = openstack.org/om/rpc + +# Address prefix for all generated Notification addresses (string value) +#notify_address_prefix = openstack.org/om/notify + +# Appended to the address prefix when sending a fanout message. Used by the +# message bus to identify fanout messages (string value) +#multicast_address = multicast + +# Appended to the address prefix when sending to a particular RPC/Notification +# server. Used by the message bus to identify messages sent to a single +# destination (string value) +#unicast_address = unicast + +# Appended to the address prefix when sending to a group of consumers. Used by +# the message bus to identify messages that should be delivered in a round-robin +# fashion across consumers (string value) +#anycast_address = anycast + +# Exchange name used in notification addresses. +# Exchange name resolution precedence: +# Target.exchange if set +# else default_notification_exchange if set +# else control_exchange if set +# else 'notify' (string value) +#default_notification_exchange = + +# Exchange name used in RPC addresses. +# Exchange name resolution precedence: +# Target.exchange if set +# else default_rpc_exchange if set +# else control_exchange if set +# else 'rpc' (string value) +#default_rpc_exchange = + +# Window size for incoming RPC Reply messages (integer value) +# Minimum value: 1 +#reply_link_credit = 200 + +# Window size for incoming RPC Request messages (integer value) +# Minimum value: 1 +#rpc_server_credit = 100 + +# Window size for incoming Notification messages (integer value) +# Minimum value: 1 +#notify_server_credit = 100 + +# Send messages of this type pre-settled. +# Pre-settled messages will not receive acknowledgement +# from the peer. Note well: pre-settled messages may be +# silently discarded if the delivery fails. +# Permitted values: +# 'rpc-call' - send RPC Calls pre-settled +# 'rpc-reply'- send RPC Replies pre-settled +# 'rpc-cast' - Send RPC Casts pre-settled +# 'notify' - Send Notifications pre-settled +# (multi valued) +#pre_settled = rpc-cast +#pre_settled = rpc-reply + + +[oslo_messaging_kafka] + +# +# From oslo.messaging +# + +# Max fetch bytes of Kafka consumer (integer value) +#kafka_max_fetch_bytes = 1048576 + +# Default timeout(s) for Kafka consumers (floating point value) +#kafka_consumer_timeout = 1.0 + +# DEPRECATED: Pool Size for Kafka Consumers (integer value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Driver no longer uses connection pool. +#pool_size = 10 + +# DEPRECATED: The pool size limit for connections expiration policy (integer +# value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Driver no longer uses connection pool. +#conn_pool_min_size = 2 + +# DEPRECATED: The time-to-live in sec of idle connections in the pool (integer +# value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Driver no longer uses connection pool. +#conn_pool_ttl = 1200 + +# Group id for Kafka consumer. Consumers in one group will coordinate message +# consumption (string value) +#consumer_group = oslo_messaging_consumer + +# Upper bound on the delay for KafkaProducer batching in seconds (floating point +# value) +#producer_batch_timeout = 0.0 + +# Size of batch for the producer async send (integer value) +#producer_batch_size = 16384 + +# The compression codec for all data generated by the producer. If not set, +# compression will not be used. Note that the allowed values of this depend on +# the kafka version (string value) +# Possible values: +# none - +# gzip - +# snappy - +# lz4 - +# zstd - +#compression_codec = none + +# Enable asynchronous consumer commits (boolean value) +#enable_auto_commit = false + +# The maximum number of records returned in a poll call (integer value) +#max_poll_records = 500 + +# Protocol used to communicate with brokers (string value) +# Possible values: +# PLAINTEXT - +# SASL_PLAINTEXT - +# SSL - +# SASL_SSL - +#security_protocol = PLAINTEXT + +# Mechanism when security protocol is SASL (string value) +#sasl_mechanism = PLAIN + +# CA certificate PEM file used to verify the server certificate (string value) +#ssl_cafile = + +# Client certificate PEM file used for authentication (string value) +#ssl_client_cert_file = + +# Client key PEM file used for authentication (string value) +#ssl_client_key_file = + +# Client key password file used for authentication (string value) +#ssl_client_key_password = + + +[oslo_messaging_notifications] + +# +# From oslo.messaging +# + +# The Drivers(s) to handle sending notifications. Possible values are messaging, +# messagingv2, routing, log, test, noop (multi valued) +# Deprecated group/name - [DEFAULT]/notification_driver +#driver = + +# A URL representing the messaging driver to use for notifications. If not set, +# we fall back to the same configuration used for RPC (string value) +# Deprecated group/name - [DEFAULT]/notification_transport_url +#transport_url = + +# AMQP topic used for OpenStack notifications (list value) +# Deprecated group/name - [rpc_notifier2]/topics +# Deprecated group/name - [DEFAULT]/notification_topics +#topics = notifications + +# The maximum number of attempts to re-send a notification message which failed +# to be delivered due to a recoverable error. 0 - No retry, -1 - indefinite +# (integer value) +#retry = -1 + + +[oslo_messaging_rabbit] + +# +# From oslo.messaging +# + +# Use durable queues in AMQP (boolean value) +#amqp_durable_queues = false + +# Auto-delete queues in AMQP (boolean value) +#amqp_auto_delete = false + +# Connect over SSL (boolean value) +# Deprecated group/name - [oslo_messaging_rabbit]/rabbit_use_ssl +#ssl = false + +# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and +# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some +# distributions (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_version +#ssl_version = + +# SSL key file (valid only if SSL enabled) (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_keyfile +#ssl_key_file = + +# SSL cert file (valid only if SSL enabled) (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_certfile +#ssl_cert_file = + +# SSL certification authority file (valid only if SSL enabled) (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_ca_certs +#ssl_ca_file = + +# DEPRECATED: Run the health check heartbeat thread through a native python +# thread by default. If this option is equal to False then the health check +# heartbeat will inherit the execution model from the parent process. For +# example if the parent process has monkey patched the stdlib by using +# eventlet/greenlet then the heartbeat will be run through a green thread +# (boolean value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#heartbeat_in_pthread = true + +# How long to wait before reconnecting in response to an AMQP consumer cancel +# notification (floating point value) +#kombu_reconnect_delay = 1.0 + +# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not +# be used. This option may not be available in future versions (string value) +#kombu_compression = + +# How long to wait a missing client before abandoning to send it its replies. +# This value should not be longer than rpc_response_timeout (integer value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_reconnect_timeout +#kombu_missing_consumer_retry_timeout = 60 + +# Determines how the next RabbitMQ node is chosen in case the one we are +# currently connected to becomes unavailable. Takes effect only if more than one +# RabbitMQ node is provided in config (string value) +# Possible values: +# round-robin - +# shuffle - +#kombu_failover_strategy = round-robin + +# The RabbitMQ login method (string value) +# Possible values: +# PLAIN - +# AMQPLAIN - +# RABBIT-CR-DEMO - +#rabbit_login_method = AMQPLAIN + +# How frequently to retry connecting with RabbitMQ (integer value) +#rabbit_retry_interval = 1 + +# How long to backoff for between retries when connecting to RabbitMQ (integer +# value) +#rabbit_retry_backoff = 2 + +# Maximum interval of RabbitMQ connection retries. Default is 30 seconds +# (integer value) +#rabbit_interval_max = 30 + +# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this +# option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring +# is no longer controlled by the x-ha-policy argument when declaring a queue. If +# you just want to make sure that all queues (except those with auto-generated +# names) are mirrored across all nodes, run: "rabbitmqctl set_policy HA +# '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value) +#rabbit_ha_queues = false + +# Positive integer representing duration in seconds for queue TTL (x-expires). +# Queues which are unused for the duration of the TTL are automatically deleted. +# The parameter affects only reply and fanout queues (integer value) +# Minimum value: 1 +#rabbit_transient_queues_ttl = 1800 + +# Specifies the number of messages to prefetch. Setting to zero allows unlimited +# messages (integer value) +#rabbit_qos_prefetch_count = 0 + +# Number of seconds after which the Rabbit broker is considered down if +# heartbeat's keep-alive fails (0 disables heartbeat) (integer value) +#heartbeat_timeout_threshold = 60 + +# How often times during the heartbeat_timeout_threshold we check the heartbeat +# (integer value) +#heartbeat_rate = 2 + +# DEPRECATED: (DEPRECATED) Enable/Disable the RabbitMQ mandatory flag for direct +# send. The direct send is used as reply, so the MessageUndeliverable exception +# is raised in case the client queue does not exist.MessageUndeliverable +# exception will be used to loop for a timeout to lets a chance to sender to +# recover.This flag is deprecated and it will not be possible to deactivate this +# functionality anymore (boolean value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Mandatory flag no longer deactivable. +#direct_mandatory_flag = true + +# Enable x-cancel-on-ha-failover flag so that rabbitmq server will cancel and +# notify consumerswhen queue is down (boolean value) +#enable_cancel_on_failover = false + + +[oslo_middleware] + +# +# From oslo.middleware +# + +# The maximum body size for each request, in bytes (integer value) +# Deprecated group/name - [DEFAULT]/osapi_max_request_body_size +# Deprecated group/name - [DEFAULT]/max_request_body_size +#max_request_body_size = 114688 + +# DEPRECATED: The HTTP Header that will be used to determine what the original +# request protocol scheme was, even if it was hidden by a SSL termination proxy +# (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#secure_proxy_ssl_header = X-Forwarded-Proto + +# Whether the application is behind a proxy or not. This determines if the +# middleware should parse the headers or not (boolean value) +#enable_proxy_headers_parsing = false + + +[oslo_policy] + +# +# From oslo.policy +# + +# This option controls whether or not to enforce scope when evaluating policies. +# If ``True``, the scope of the token used in the request is compared to the +# ``scope_types`` of the policy being enforced. If the scopes do not match, an +# ``InvalidScope`` exception will be raised. If ``False``, a message will be +# logged informing operators that policies are being invoked with mismatching +# scope (boolean value) +#enforce_scope = false + +# This option controls whether or not to use old deprecated defaults when +# evaluating policies. If ``True``, the old deprecated defaults are not going to +# be evaluated. This means if any existing token is allowed for old defaults but +# is disallowed for new defaults, it will be disallowed. It is encouraged to +# enable this flag along with the ``enforce_scope`` flag so that you can get the +# benefits of new defaults and ``scope_type`` together (boolean value) +#enforce_new_defaults = false + +# The relative or absolute path of a file that maps roles to permissions for a +# given service. Relative paths must be specified in relation to the +# configuration file setting this option (string value) +#policy_file = policy.yaml + +# Default rule. Enforced when a requested rule is not found (string value) +#policy_default_rule = default + +# Directories where policy configuration files are stored. They can be relative +# to any directory in the search path defined by the config_dir option, or +# absolute paths. The file defined by policy_file must exist for these +# directories to be searched. Missing or empty directories are ignored (multi +# valued) +#policy_dirs = policy.d + +# Content Type to send and receive data for REST based policy check (string +# value) +# Possible values: +# application/x-www-form-urlencoded - +# application/json - +#remote_content_type = application/x-www-form-urlencoded + +# server identity verification for REST based policy check (boolean value) +#remote_ssl_verify_server_crt = false + +# Absolute path to ca cert file for REST based policy check (string value) +#remote_ssl_ca_crt_file = + +# Absolute path to client cert for REST based policy check (string value) +#remote_ssl_client_crt_file = + +# Absolute path client key file REST based policy check (string value) +#remote_ssl_client_key_file = + + +[pci] + +# +# From nova.conf +# + +# +# An alias for a PCI passthrough device requirement. For more information, refer +# to the documentation. (multi valued) +# Deprecated group/name - [DEFAULT]/pci_alias +#alias = + +# +# White list of PCI devices available to VMs. For more information, refer to the +# documentation. (multi valued) +# Deprecated group/name - [DEFAULT]/pci_passthrough_whitelist +#passthrough_whitelist = + + +[placement] +{% if identity_service.internal_host -%} +auth_url = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }} +auth_type = password +project_domain_name = {{ identity_service.service_domain_name }} +user_domain_name = {{ identity_service.service_domain_name }} +project_name = {{ identity_service.service_project_name }} +username = {{ identity_service.service_user_name }} +password = {{ identity_service.service_password }} +# XXX Region should come from the id relation here +region = {{ options.region }} +{% endif -%} + +# +# From nova.conf +# + +# PEM encoded Certificate Authority to use when verifying HTTPs connections +# (string value) +#cafile = + +# PEM encoded client certificate cert file (string value) +#certfile = + +# PEM encoded client certificate key file (string value) +#keyfile = + +# Verify HTTPS connections (boolean value) +#insecure = false + +# Timeout value for http requests (integer value) +#timeout = + +# Collect per-API call timing information (boolean value) +#collect_timing = false + +# Log requests to multiple loggers (boolean value) +#split_loggers = false + +# Authentication type to load (string value) +# Deprecated group/name - [placement]/auth_plugin +#auth_type = + +# Config Section from which to load plugin specific options (string value) +#auth_section = + +# Authentication URL (string value) +#auth_url = + +# Scope for system operations (string value) +#system_scope = + +# Domain ID to scope to (string value) +#domain_id = + +# Domain name to scope to (string value) +#domain_name = + +# Project ID to scope to (string value) +#project_id = + +# Project name to scope to (string value) +#project_name = + +# Domain ID containing project (string value) +#project_domain_id = + +# Domain name containing project (string value) +#project_domain_name = + +# Trust ID (string value) +#trust_id = + +# Optional domain ID to use with v3 and v2 parameters. It will be used for both +# the user and project domain in v3 and ignored in v2 authentication (string +# value) +#default_domain_id = + +# Optional domain name to use with v3 API and v2 parameters. It will be used for +# both the user and project domain in v3 and ignored in v2 authentication +# (string value) +#default_domain_name = + +# User ID (string value) +#user_id = + +# Username (string value) +# Deprecated group/name - [placement]/user_name +#username = + +# User's domain id (string value) +#user_domain_id = + +# User's domain name (string value) +#user_domain_name = + +# User's password (string value) +#password = + +# Tenant ID (string value) +#tenant_id = + +# Tenant Name (string value) +#tenant_name = + +# The default service_type for endpoint URL discovery (string value) +#service_type = placement + +# The default service_name for endpoint URL discovery (string value) +#service_name = + +# List of interfaces, in order of preference, for endpoint URL (list value) +#valid_interfaces = internal,public + +# The default region_name for endpoint URL discovery (string value) +#region_name = + +# Always use this endpoint URL for requests for this client. NOTE: The +# unversioned endpoint should be specified here; to request a particular API +# version, use the `version`, `min-version`, and/or `max-version` options +# (string value) +#endpoint_override = + +# The maximum number of retries that should be attempted for connection errors +# (integer value) +#connect_retries = + +# Delay (in seconds) between two retries for connection errors. If not set, +# exponential retry starting with 0.5 seconds up to a maximum of 60 seconds is +# used (floating point value) +#connect_retry_delay = + +# The maximum number of retries that should be attempted for retriable HTTP +# status codes (integer value) +#status_code_retries = + +# Delay (in seconds) between two retries for retriable status codes. If not set, +# exponential retry starting with 0.5 seconds up to a maximum of 60 seconds is +# used (floating point value) +#status_code_retry_delay = + + +[powervm] +# +# PowerVM options allow cloud administrators to configure how OpenStack will +# work +# with the PowerVM hypervisor. + +# +# From nova.conf +# + +# +# Factor used to calculate the amount of physical processor compute power given +# to each vCPU. E.g. A value of 1.0 means a whole physical processor, whereas +# 0.05 means 1/20th of a physical processor. +# (floating point value) +# Minimum value: 0.05 +# Maximum value: 1 +#proc_units_factor = 0.1 + +# +# The disk driver to use for PowerVM disks. PowerVM provides support for +# localdisk and PowerVM Shared Storage Pool disk drivers. For more information, +# refer to the documentation. (string value) +# Possible values: +# localdisk - +# ssp - +#disk_driver = localdisk + +# +# Volume Group to use for block device operations. If disk_driver is localdisk, +# then this attribute must be specified. It is strongly recommended NOT to use +# rootvg since that is used by the management partition and filling it will +# cause +# failures. +# (string value) +#volume_group_name = + + +[privsep] +# Configuration options for the oslo.privsep daemon. Note that this group name +# can be changed by the consuming service. Check the service's docs to see if +# this is the case. + +# +# From oslo.privsep +# + +# User that the privsep daemon should run as (string value) +#user = + +# Group that the privsep daemon should run as (string value) +#group = + +# List of Linux capabilities retained by the privsep daemon (list value) +#capabilities = + +# The number of threads available for privsep to concurrently run processes. +# Defaults to the number of CPU cores in the system (integer value) +# Minimum value: 1 +# +# This option has a sample default set, which means that +# its actual default value may vary from the one documented +# below. +#thread_pool_size = multiprocessing.cpu_count() + +# Command to invoke to start the privsep daemon if not using the "fork" method. +# If not specified, a default is generated using "sudo privsep-helper" and +# arguments designed to recreate the current configuration. This command must +# accept suitable --privsep_context and --privsep_sock_path arguments (string +# value) +#helper_command = + + +[profiler] + +# +# From osprofiler +# + +# +# Enable the profiling for all services on this node. For more information, +# refer to the documentation. (boolean value) +# Deprecated group/name - [profiler]/profiler_enabled +#enabled = false + +# +# Enable SQL requests profiling in services. For more information, refer to the +# documentation. (boolean value) +#trace_sqlalchemy = false + +# +# Secret key(s) to use for encrypting context data for performance profiling. +# For more information, refer to the documentation. (string value) +#hmac_keys = SECRET_KEY + +# +# Connection string for a notifier backend. For more information, refer to the +# documentation. (string value) +#connection_string = messaging:// + +# +# Document type for notification indexing in elasticsearch. +# (string value) +#es_doc_type = notification + +# +# This parameter is a time value parameter (for example: es_scroll_time=2m), +# indicating for how long the nodes that participate in the search will maintain +# relevant resources in order to continue and support it. +# (string value) +#es_scroll_time = 2m + +# +# Elasticsearch splits large requests in batches. This parameter defines +# maximum size of each batch (for example: es_scroll_size=10000). +# (integer value) +#es_scroll_size = 10000 + +# +# Redissentinel provides a timeout option on the connections. +# This parameter defines that timeout (for example: socket_timeout=0.1). +# (floating point value) +#socket_timeout = 0.1 + +# +# Redissentinel uses a service name to identify a master redis service. +# This parameter defines the name (for example: +# ``sentinal_service_name=mymaster``). +# (string value) +#sentinel_service_name = mymaster + +# +# Enable filter traces that contain error/exception to a separated place. For +# more information, refer to the documentation. (boolean value) +#filter_error_trace = false + + +[quota] +# +# Quota options allow to manage quotas in openstack deployment. + +# +# From nova.conf +# + +# +# The number of instances allowed per project. For more information, refer to +# the documentation. (integer value) +# Minimum value: -1 +# Deprecated group/name - [DEFAULT]/quota_instances +#instances = 10 + +# +# The number of instance cores or vCPUs allowed per project. For more +# information, refer to the documentation. (integer value) +# Minimum value: -1 +# Deprecated group/name - [DEFAULT]/quota_cores +#cores = 20 + +# +# The number of megabytes of instance RAM allowed per project. For more +# information, refer to the documentation. (integer value) +# Minimum value: -1 +# Deprecated group/name - [DEFAULT]/quota_ram +#ram = 51200 + +# +# The number of metadata items allowed per instance. For more information, refer +# to the documentation. (integer value) +# Minimum value: -1 +# Deprecated group/name - [DEFAULT]/quota_metadata_items +#metadata_items = 128 + +# +# The number of injected files allowed. For more information, refer to the +# documentation. (integer value) +# Minimum value: -1 +# Deprecated group/name - [DEFAULT]/quota_injected_files +#injected_files = 5 + +# +# The number of bytes allowed per injected file. For more information, refer to +# the documentation. (integer value) +# Minimum value: -1 +# Deprecated group/name - [DEFAULT]/quota_injected_file_content_bytes +#injected_file_content_bytes = 10240 + +# +# The maximum allowed injected file path length. For more information, refer to +# the documentation. (integer value) +# Minimum value: -1 +# Deprecated group/name - [DEFAULT]/quota_injected_file_path_length +#injected_file_path_length = 255 + +# +# The maximum number of key pairs allowed per user. For more information, refer +# to the documentation. (integer value) +# Minimum value: -1 +# Deprecated group/name - [DEFAULT]/quota_key_pairs +#key_pairs = 100 + +# +# The maxiumum number of server groups per project. For more information, refer +# to the documentation. (integer value) +# Minimum value: -1 +# Deprecated group/name - [DEFAULT]/quota_server_groups +#server_groups = 10 + +# +# The maximum number of servers per server group. For more information, refer to +# the documentation. (integer value) +# Minimum value: -1 +# Deprecated group/name - [DEFAULT]/quota_server_group_members +#server_group_members = 10 + +# +# Provides abstraction for quota checks. Users can configure a specific +# driver to use for quota checks. +# (string value) +# Possible values: +# nova.quota.DbQuotaDriver - Stores quota limit information in the database and +# relies on the ``quota_*`` configuration options for default quota limit +# values. Counts quota usage on-demand. +# nova.quota.NoopQuotaDriver - Ignores quota and treats all resources as +# unlimited. +#driver = nova.quota.DbQuotaDriver + +# +# Recheck quota after resource creation to prevent allowing quota to be +# exceeded. For more information, refer to the documentation. (boolean value) +#recheck_quota = true + +# +# Enable the counting of quota usage from the placement service. For more +# information, refer to the documentation. (boolean value) +#count_usage_from_placement = false + + +[rdp] +# +# Options under this group enable and configure Remote Desktop Protocol ( +# RDP) related features. +# +# This group is only relevant to Hyper-V users. + +# +# From nova.conf +# + +# +# Enable Remote Desktop Protocol (RDP) related features. For more information, +# refer to the documentation. (boolean value) +#enabled = false + +# +# The URL an end user would use to connect to the RDP HTML5 console proxy. +# The console proxy service is called with this token-embedded URL and +# establishes the connection to the proper instance. For more information, refer +# to the documentation. (uri value) +#html5_proxy_base_url = http://127.0.0.1:6083/ + + +[remote_debug] + +# +# From nova.conf +# + +# +# Debug host (IP or name) to connect to. For more information, refer to the +# documentation. (host address value) +#host = + +# +# Debug port to connect to. For more information, refer to the documentation. +# (port value) +# Minimum value: 0 +# Maximum value: 65535 +#port = + + +[scheduler] + +# +# From nova.conf +# + +# +# The maximum number of schedule attempts. For more information, refer to the +# documentation. (integer value) +# Minimum value: 1 +# Deprecated group/name - [DEFAULT]/scheduler_max_attempts +#max_attempts = 3 + +# +# Periodic task interval. For more information, refer to the documentation. +# (integer value) +# Minimum value: -1 +#discover_hosts_in_cells_interval = -1 + +# +# The maximum number of placement results to request. For more information, +# refer to the documentation. (integer value) +# Minimum value: 1 +#max_placement_results = 1000 + +# +# Number of workers for the nova-scheduler service. For more information, refer +# to the documentation. (integer value) +# Minimum value: 0 +#workers = + +# +# Enable the scheduler to filter compute hosts affined to routed network segment +# aggregates. For more information, refer to the documentation. (boolean value) +#query_placement_for_routed_network_aggregates = false + +# +# Restrict tenants to specific placement aggregates. For more information, refer +# to the documentation. (boolean value) +#limit_tenants_to_placement_aggregate = false + +# +# Require a placement aggregate association for all tenants. For more +# information, refer to the documentation. (boolean value) +#placement_aggregate_required_for_tenants = false + +# +# Use placement to determine availability zones. For more information, refer to +# the documentation. (boolean value) +#query_placement_for_availability_zone = false + +# +# Use placement to determine host support for the instance's image type. For +# more information, refer to the documentation. (boolean value) +#query_placement_for_image_type_support = false + +# +# Restrict use of aggregates to instances with matching metadata. For more +# information, refer to the documentation. (boolean value) +#enable_isolated_aggregate_filtering = false + +# +# Use placement to filter hosts based on image metadata. For more information, +# refer to the documentation. (boolean value) +#image_metadata_prefilter = false + + +[serial_console] +# +# The serial console feature allows you to connect to a guest in case a +# graphical console like VNC, RDP or SPICE is not available. This is only +# currently supported for the libvirt, Ironic and hyper-v drivers. + +# +# From nova.conf +# + +# +# Enable the serial console feature. For more information, refer to the +# documentation. (boolean value) +#enabled = false + +# +# A range of TCP ports a guest can use for its backend. For more information, +# refer to the documentation. (string value) +#port_range = 10000:20000 + +# +# The URL an end user would use to connect to the ``nova-serialproxy`` service. +# For more information, refer to the documentation. (uri value) +#base_url = ws://127.0.0.1:6083/ + +# +# The IP address to which proxy clients (like ``nova-serialproxy``) should +# connect to get the serial console of an instance. For more information, refer +# to the documentation. (string value) +#proxyclient_address = 127.0.0.1 + +# +# The IP address which is used by the ``nova-serialproxy`` service to listen +# for incoming requests. For more information, refer to the documentation. +# (string value) +#serialproxy_host = 0.0.0.0 + +# +# The port number which is used by the ``nova-serialproxy`` service to listen +# for incoming requests. For more information, refer to the documentation. (port +# value) +# Minimum value: 0 +# Maximum value: 65535 +#serialproxy_port = 6083 + + +[service_user] +# +# Configuration options for service to service authentication using a service +# token. These options allow sending a service token along with the user's token +# when contacting external REST APIs. + +# +# From nova.conf +# + +# +# When True, if sending a user token to a REST API, also send a service token. +# For more information, refer to the documentation. (boolean value) +#send_service_user_token = false + +# PEM encoded Certificate Authority to use when verifying HTTPs connections +# (string value) +#cafile = + +# PEM encoded client certificate cert file (string value) +#certfile = + +# PEM encoded client certificate key file (string value) +#keyfile = + +# Verify HTTPS connections (boolean value) +#insecure = false + +# Timeout value for http requests (integer value) +#timeout = + +# Collect per-API call timing information (boolean value) +#collect_timing = false + +# Log requests to multiple loggers (boolean value) +#split_loggers = false + +# Authentication type to load (string value) +# Deprecated group/name - [service_user]/auth_plugin +#auth_type = + +# Config Section from which to load plugin specific options (string value) +#auth_section = + +# Authentication URL (string value) +#auth_url = + +# Scope for system operations (string value) +#system_scope = + +# Domain ID to scope to (string value) +#domain_id = + +# Domain name to scope to (string value) +#domain_name = + +# Project ID to scope to (string value) +#project_id = + +# Project name to scope to (string value) +#project_name = + +# Domain ID containing project (string value) +#project_domain_id = + +# Domain name containing project (string value) +#project_domain_name = + +# Trust ID (string value) +#trust_id = + +# Optional domain ID to use with v3 and v2 parameters. It will be used for both +# the user and project domain in v3 and ignored in v2 authentication (string +# value) +#default_domain_id = + +# Optional domain name to use with v3 API and v2 parameters. It will be used for +# both the user and project domain in v3 and ignored in v2 authentication +# (string value) +#default_domain_name = + +# User ID (string value) +#user_id = + +# Username (string value) +# Deprecated group/name - [service_user]/user_name +#username = + +# User's domain id (string value) +#user_domain_id = + +# User's domain name (string value) +#user_domain_name = + +# User's password (string value) +#password = + +# Tenant ID (string value) +#tenant_id = + +# Tenant Name (string value) +#tenant_name = + + +[spice] +# +# SPICE console feature allows you to connect to a guest virtual machine. +# SPICE is a replacement for fairly limited VNC protocol. +# +# Following requirements must be met in order to use SPICE: +# +# * Virtualization driver must be libvirt +# * spice.enabled set to True +# * vnc.enabled set to False +# * update html5proxy_base_url +# * update server_proxyclient_address + +# +# From nova.conf +# + +# +# Enable SPICE related features. For more information, refer to the +# documentation. (boolean value) +#enabled = false + +# +# Enable the SPICE guest agent support on the instances. For more information, +# refer to the documentation. (boolean value) +#agent_enabled = true + +# +# Location of the SPICE HTML5 console proxy. For more information, refer to the +# documentation. (uri value) +#html5proxy_base_url = http://127.0.0.1:6082/spice_auto.html + +# +# The address where the SPICE server running on the instances should listen. +# For more information, refer to the documentation. (string value) +#server_listen = 127.0.0.1 + +# +# The address used by ``nova-spicehtml5proxy`` client to connect to instance +# console. For more information, refer to the documentation. (string value) +#server_proxyclient_address = 127.0.0.1 + +# +# IP address or a hostname on which the ``nova-spicehtml5proxy`` service +# listens for incoming requests. For more information, refer to the +# documentation. (host address value) +#html5proxy_host = 0.0.0.0 + +# +# Port on which the ``nova-spicehtml5proxy`` service listens for incoming +# requests. For more information, refer to the documentation. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#html5proxy_port = 6082 + + +[upgrade_levels] +# +# upgrade_levels options are used to set version cap for RPC +# messages sent between different nova services. +# +# By default all services send messages using the latest version +# they know about. +# +# The compute upgrade level is an important part of rolling upgrades +# where old and new nova-compute services run side by side. +# +# The other options can largely be ignored, and are only kept to +# help with a possible future backport issue. + +# +# From nova.conf +# + +# +# Compute RPC API version cap. For more information, refer to the documentation. +# (string value) +#compute = + +# DEPRECATED: +# Cert RPC API version cap. For more information, refer to the documentation. +# (string value) +# This option is deprecated for removal since 18.0.0. +# Its value may be silently ignored in the future. +# Reason: +# The nova-cert service was removed in 16.0.0 (Pike) so this option +# is no longer used. +#cert = + +# +# Scheduler RPC API version cap. For more information, refer to the +# documentation. (string value) +#scheduler = + +# +# Conductor RPC API version cap. For more information, refer to the +# documentation. (string value) +#conductor = + +# +# Base API RPC API version cap. For more information, refer to the +# documentation. (string value) +#baseapi = + + +[vault] + +# +# From nova.conf +# + +# root token for vault (string value) +#root_token_id = + +# AppRole role_id for authentication with vault (string value) +#approle_role_id = + +# AppRole secret_id for authentication with vault (string value) +#approle_secret_id = + +# Mountpoint of KV store in Vault to use, for example: secret (string value) +#kv_mountpoint = secret + +# Version of KV store in Vault to use, for example: 2 (integer value) +#kv_version = 2 + +# Use this endpoint to connect to Vault, for example: "http://127.0.0.1:8200" +# (string value) +#vault_url = http://127.0.0.1:8200 + +# Absolute path to ca cert file (string value) +#ssl_ca_crt_file = + +# SSL Enabled/Disabled (boolean value) +#use_ssl = false + + +[vendordata_dynamic_auth] +# +# Options within this group control the authentication of the vendordata +# subsystem of the metadata API server (and config drive) with external systems. + +# +# From nova.conf +# + +# PEM encoded Certificate Authority to use when verifying HTTPs connections +# (string value) +#cafile = + +# PEM encoded client certificate cert file (string value) +#certfile = + +# PEM encoded client certificate key file (string value) +#keyfile = + +# Verify HTTPS connections (boolean value) +#insecure = false + +# Timeout value for http requests (integer value) +#timeout = + +# Collect per-API call timing information (boolean value) +#collect_timing = false + +# Log requests to multiple loggers (boolean value) +#split_loggers = false + +# Authentication type to load (string value) +# Deprecated group/name - [vendordata_dynamic_auth]/auth_plugin +#auth_type = + +# Config Section from which to load plugin specific options (string value) +#auth_section = + +# Authentication URL (string value) +#auth_url = + +# Scope for system operations (string value) +#system_scope = + +# Domain ID to scope to (string value) +#domain_id = + +# Domain name to scope to (string value) +#domain_name = + +# Project ID to scope to (string value) +#project_id = + +# Project name to scope to (string value) +#project_name = + +# Domain ID containing project (string value) +#project_domain_id = + +# Domain name containing project (string value) +#project_domain_name = + +# Trust ID (string value) +#trust_id = + +# Optional domain ID to use with v3 and v2 parameters. It will be used for both +# the user and project domain in v3 and ignored in v2 authentication (string +# value) +#default_domain_id = + +# Optional domain name to use with v3 API and v2 parameters. It will be used for +# both the user and project domain in v3 and ignored in v2 authentication +# (string value) +#default_domain_name = + +# User ID (string value) +#user_id = + +# Username (string value) +# Deprecated group/name - [vendordata_dynamic_auth]/user_name +#username = + +# User's domain id (string value) +#user_domain_id = + +# User's domain name (string value) +#user_domain_name = + +# User's password (string value) +#password = + +# Tenant ID (string value) +#tenant_id = + +# Tenant Name (string value) +#tenant_name = + + +[vmware] +# +# Related options: +# Following options must be set in order to launch VMware-based +# virtual machines. +# +# * compute_driver: Must use vmwareapi.VMwareVCDriver. +# * vmware.host_username +# * vmware.host_password +# * vmware.cluster_name + +# +# From nova.conf +# + +# +# This option should be configured only when using the NSX-MH Neutron +# plugin. This is the name of the integration bridge on the ESXi server +# or host. This should not be set for any other Neutron plugin. Hence +# the default value is not set. For more information, refer to the +# documentation. (string value) +#integration_bridge = + +# +# Set this value if affected by an increased network latency causing +# repeated characters when typing in a remote console. +# (integer value) +# Minimum value: 0 +#console_delay_seconds = + +# +# Identifies the remote system where the serial port traffic will +# be sent. For more information, refer to the documentation. (string value) +#serial_port_service_uri = + +# +# Identifies a proxy service that provides network access to the +# serial_port_service_uri. For more information, refer to the documentation. +# (uri value) +#serial_port_proxy_uri = + +# +# Specifies the directory where the Virtual Serial Port Concentrator is +# storing console log files. It should match the 'serial_log_dir' config +# value of VSPC. +# (string value) +#serial_log_dir = /opt/vmware/vspc + +# +# Hostname or IP address for connection to VMware vCenter host (host address +# value) +#host_ip = + +# Port for connection to VMware vCenter host (port value) +# Minimum value: 0 +# Maximum value: 65535 +#host_port = 443 + +# Username for connection to VMware vCenter host (string value) +#host_username = + +# Password for connection to VMware vCenter host (string value) +#host_password = + +# +# Specifies the CA bundle file to be used in verifying the vCenter +# server certificate. +# (string value) +#ca_file = + +# +# If true, the vCenter server certificate is not verified. If false, +# then the default CA truststore is used for verification. For more information, +# refer to the documentation. (boolean value) +#insecure = false + +# Name of a VMware Cluster ComputeResource (string value) +#cluster_name = + +# +# Regular expression pattern to match the name of datastore. For more +# information, refer to the documentation. (string value) +#datastore_regex = + +# +# Time interval in seconds to poll remote tasks invoked on +# VMware VC server. +# (floating point value) +#task_poll_interval = 0.5 + +# +# Number of times VMware vCenter server API must be retried on connection +# failures, e.g. socket error, etc. +# (integer value) +# Minimum value: 0 +#api_retry_count = 10 + +# +# This option specifies VNC starting port. For more information, refer to the +# documentation. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#vnc_port = 5900 + +# +# Total number of VNC ports. +# (integer value) +# Minimum value: 0 +#vnc_port_total = 10000 + +# +# Keymap for VNC. For more information, refer to the documentation. (string +# value) +#vnc_keymap = en-us + +# +# This option enables/disables the use of linked clone. For more information, +# refer to the documentation. (boolean value) +#use_linked_clone = true + +# +# This option sets the http connection pool size. For more information, refer to +# the documentation. (integer value) +# Minimum value: 10 +#connection_pool_size = 10 + +# +# This option enables or disables storage policy based placement +# of instances. For more information, refer to the documentation. (boolean +# value) +#pbm_enabled = false + +# +# This option specifies the PBM service WSDL file location URL. For more +# information, refer to the documentation. (string value) +#pbm_wsdl_location = + +# +# This option specifies the default policy to be used. For more information, +# refer to the documentation. (string value) +#pbm_default_policy = + +# +# This option specifies the limit on the maximum number of objects to +# return in a single result. For more information, refer to the documentation. +# (integer value) +# Minimum value: 0 +#maximum_objects = 100 + +# +# This option adds a prefix to the folder where cached images are stored. For +# more information, refer to the documentation. (string value) +#cache_prefix = + + +[vnc] +# +# Virtual Network Computer (VNC) can be used to provide remote desktop +# console access to instances for tenants and/or administrators. + +# +# From nova.conf +# + +# +# Enable VNC related features. For more information, refer to the documentation. +# (boolean value) +# Deprecated group/name - [DEFAULT]/vnc_enabled +#enabled = true + +# +# The IP address or hostname on which an instance should listen to for +# incoming VNC connection requests on this node. +# (host address value) +# Deprecated group/name - [DEFAULT]/vncserver_listen +# Deprecated group/name - [vnc]/vncserver_listen +#server_listen = 127.0.0.1 + +# +# Private, internal IP address or hostname of VNC console proxy. For more +# information, refer to the documentation. (host address value) +# Deprecated group/name - [DEFAULT]/vncserver_proxyclient_address +# Deprecated group/name - [vnc]/vncserver_proxyclient_address +#server_proxyclient_address = 127.0.0.1 + +# +# Public address of noVNC VNC console proxy. For more information, refer to the +# documentation. (uri value) +#novncproxy_base_url = http://127.0.0.1:6080/vnc_auto.html + +# +# IP address that the noVNC console proxy should bind to. For more information, +# refer to the documentation. (string value) +#novncproxy_host = 0.0.0.0 + +# +# Port that the noVNC console proxy should bind to. For more information, refer +# to the documentation. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#novncproxy_port = 6080 + +# +# The authentication schemes to use with the compute node. For more information, +# refer to the documentation. (list value) +#auth_schemes = none + +# The path to the client certificate PEM file (for x509). For more information, +# refer to the documentation. (string value) +#vencrypt_client_key = + +# The path to the client key file (for x509). For more information, refer to the +# documentation. (string value) +#vencrypt_client_cert = + +# The path to the CA certificate PEM file. For more information, refer to the +# documentation. (string value) +#vencrypt_ca_certs = + + +[workarounds] +# +# A collection of workarounds used to mitigate bugs or issues found in system +# tools (e.g. Libvirt or QEMU) or Nova itself under certain conditions. These +# should only be enabled in exceptional circumstances. All options are linked +# against bug IDs, where more information on the issue can be found. + +# +# From nova.conf +# + +# +# Use sudo instead of rootwrap. For more information, refer to the +# documentation. (boolean value) +#disable_rootwrap = false + +# DEPRECATED: +# Disable live snapshots when using the libvirt driver. For more information, +# refer to the documentation. (boolean value) +# This option is deprecated for removal since 19.0.0. +# Its value may be silently ignored in the future. +# Reason: +# This option was added to work around issues with libvirt 1.2.2. We no longer +# support this version of libvirt, which means this workaround is no longer +# necessary. It will be removed in a future release. +#disable_libvirt_livesnapshot = false + +# +# Enable handling of events emitted from compute drivers. For more information, +# refer to the documentation. (boolean value) +#handle_virt_lifecycle_events = true + +# +# Disable the server group policy check upcall in compute. For more information, +# refer to the documentation. (boolean value) +#disable_group_policy_check_upcall = false + +# DEPRECATED: +# Enable live migration of instances with NUMA topologies. For more information, +# refer to the documentation. (boolean value) +# This option is deprecated for removal since 20.0.0. +# Its value may be silently ignored in the future. +# Reason: This option was added to mitigate known issues +# when live migrating instances with a NUMA topology with the libvirt driver. +# Those issues are resolved in Train. Clouds using the libvirt driver and fully +# upgraded to Train support NUMA-aware live migration. This option will be +# removed in a future release. +#enable_numa_live_migration = false + +# +# Ensure the instance directory is removed during clean up when using rbd. For +# more information, refer to the documentation. (boolean value) +#ensure_libvirt_rbd_instance_dir_cleanup = false + +# DEPRECATED: +# Disable fallback request for VCPU allocations when using pinned instances. For +# more information, refer to the documentation. (boolean value) +# This option is deprecated for removal since 20.0.0. +# Its value may be silently ignored in the future. +#disable_fallback_pcpu_query = false + +# +# When booting from an image on a ceph-backed compute node, if the image does +# not +# already reside on the ceph cluster (as would be the case if glance is +# also using the same cluster), nova will download the image from glance and +# upload it to ceph itself. If using multiple ceph clusters, this may cause nova +# to unintentionally duplicate the image in a non-COW-able way in the local +# ceph deployment, wasting space. For more information, refer to the +# documentation. (boolean value) +#never_download_image_if_on_rbd = false + +# DEPRECATED: +# When attaching encrypted LUKSv1 Cinder volumes to instances the Libvirt driver +# configures the encrypted disks to be natively decrypted by QEMU. For more +# information, refer to the documentation. (boolean value) +# This option is deprecated for removal since 23.0.0. +# Its value may be silently ignored in the future. +# Reason: +# The underlying performance regression within libgcrypt that prompted this +# workaround has been resolved as of 1.8.5 +#disable_native_luksv1 = false + +# DEPRECATED: +# Attach RBD Cinder volumes to the compute as host block devices. For more +# information, refer to the documentation. (boolean value) +# This option is deprecated for removal since 23.0.0. +# Its value may be silently ignored in the future. +# Reason: +# The underlying performance regression within libgcrypt that prompted this +# workaround has been resolved as of 1.8.5 +#rbd_volume_local_attach = false + +# +# If it is set to True then the libvirt driver will reserve DISK_GB resource for +# the images stored in the image cache. If the +# :oslo.config:option:`DEFAULT.instances_path` is on different disk partition +# than the image cache directory then the driver will not reserve resource for +# the cache. For more information, refer to the documentation. (boolean value) +#reserve_disk_resource_for_image_cache = false + + +[wsgi] +# +# Options under this group are used to configure WSGI (Web Server Gateway +# Interface). WSGI is used to serve API requests. + +# +# From nova.conf +# + +# +# This option represents a file name for the paste.deploy config for nova-api. +# For more information, refer to the documentation. (string value) +#api_paste_config = api-paste.ini + +# DEPRECATED: +# It represents a python format string that is used as the template to generate +# log lines. The following values can be formatted into it: client_ip, +# date_time, request_line, status_code, body_length, wall_seconds. For more +# information, refer to the documentation. (string value) +# This option is deprecated for removal since 16.0.0. +# Its value may be silently ignored in the future. +# Reason: +# This option only works when running nova-api under eventlet, and +# encodes very eventlet specific pieces of information. Starting in Pike +# the preferred model for running nova-api is under uwsgi or apache +# mod_wsgi. +#wsgi_log_format = %(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f + +# +# This option specifies the HTTP header used to determine the protocol scheme +# for the original request, even if it was removed by a SSL terminating proxy. +# For more information, refer to the documentation. (string value) +#secure_proxy_ssl_header = + +# +# This option allows setting path to the CA certificate file that should be used +# to verify connecting clients. For more information, refer to the +# documentation. (string value) +#ssl_ca_file = + +# +# This option allows setting path to the SSL certificate of API server. For more +# information, refer to the documentation. (string value) +#ssl_cert_file = + +# +# This option specifies the path to the file where SSL private key of API +# server is stored when SSL is in effect. For more information, refer to the +# documentation. (string value) +#ssl_key_file = + +# +# This option sets the value of TCP_KEEPIDLE in seconds for each server socket. +# It specifies the duration of time to keep connection active. TCP generates a +# KEEPALIVE transmission for an application that requests to keep connection +# active. Not supported on OS X. For more information, refer to the +# documentation. (integer value) +# Minimum value: 0 +#tcp_keepidle = 600 + +# +# This option specifies the size of the pool of greenthreads used by wsgi. +# It is possible to limit the number of concurrent connections using this +# option. +# (integer value) +# Minimum value: 0 +# Deprecated group/name - [DEFAULT]/wsgi_default_pool_size +#default_pool_size = 1000 + +# +# This option specifies the maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large tokens (typically +# those generated by the Keystone v3 API with big service catalogs). For more +# information, refer to the documentation. (integer value) +# Minimum value: 0 +#max_header_line = 16384 + +# +# This option allows using the same TCP connection to send and receive multiple +# HTTP requests/responses, as opposed to opening a new one for every single +# request/response pair. HTTP keep-alive indicates HTTP connection reuse. For +# more information, refer to the documentation. (boolean value) +# Deprecated group/name - [DEFAULT]/wsgi_keep_alive +#keep_alive = true + +# +# This option specifies the timeout for client connections' socket operations. +# If an incoming connection is idle for this number of seconds it will be +# closed. It indicates timeout on individual read/writes on the socket +# connection. To wait forever set to 0. +# (integer value) +# Minimum value: 0 +#client_socket_timeout = 900 + + +[zvm] +# +# zvm options allows cloud administrator to configure related +# z/VM hypervisor driver to be used within an OpenStack deployment. +# +# zVM options are used when the compute_driver is set to use +# zVM (compute_driver=zvm.ZVMDriver) + +# +# From nova.conf +# + +# +# URL to be used to communicate with z/VM Cloud Connector. +# (uri value) +# +# This option has a sample default set, which means that +# its actual default value may vary from the one documented +# below. +#cloud_connector_url = http://zvm.example.org:8080/ + +# +# CA certificate file to be verified in httpd server with TLS enabled. For more +# information, refer to the documentation. (string value) +#ca_file = + +# +# The path at which images will be stored (snapshot, deploy, etc). For more +# information, refer to the documentation. (string value) +# +# This option has a sample default set, which means that +# its actual default value may vary from the one documented +# below. +#image_tmp_path = $state_path/images + +# +# Timeout (seconds) to wait for an instance to start. For more information, +# refer to the documentation. (integer value) +#reachable_timeout = 300 + + +[cells] +enable = False + + +[os_region_name] +openstack = diff --git a/charms/nova-k8s/src/templates/parts/section-database b/charms/nova-k8s/src/templates/parts/section-database new file mode 100644 index 00000000..e9062346 --- /dev/null +++ b/charms/nova-k8s/src/templates/parts/section-database @@ -0,0 +1,7 @@ +[database] +{% if shared_db.database_host -%} +connection = {{ shared_db.database_type }}://{{ shared_db.database_user }}:{{ shared_db.database_password }}@{{ shared_db.database_host }}/{{ shared_db.database }}{% if shared_db.database_ssl_ca %}?ssl_ca={{ shared_db.database_ssl_ca }}{% if shared_db.database_ssl_cert %}&ssl_cert={{ shared_db.database_ssl_cert }}&ssl_key={{ shared_db.database_ssl_key }}{% endif %}{% endif %} +{% else -%} +connection = sqlite:////var/lib/cinder/cinder.db +{% endif -%} +connection_recycle_time = 200 diff --git a/charms/nova-k8s/src/templates/parts/section-federation b/charms/nova-k8s/src/templates/parts/section-federation new file mode 100644 index 00000000..65ee99ed --- /dev/null +++ b/charms/nova-k8s/src/templates/parts/section-federation @@ -0,0 +1,10 @@ +{% if trusted_dashboards %} +[federation] +{% for dashboard_url in trusted_dashboards -%} +trusted_dashboard = {{ dashboard_url }} +{% endfor -%} +{% endif %} +{% for sp in fid_sps -%} +[{{ sp['protocol-name'] }}] +remote_id_attribute = {{ sp['remote-id-attribute'] }} +{% endfor -%} diff --git a/charms/nova-k8s/src/templates/parts/section-identity b/charms/nova-k8s/src/templates/parts/section-identity new file mode 100644 index 00000000..cda360f7 --- /dev/null +++ b/charms/nova-k8s/src/templates/parts/section-identity @@ -0,0 +1,11 @@ +[keystone_authtoken] +{% if identity_service.internal_host -%} +www_authenticate_uri = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }} +auth_url = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }} +auth_type = password +project_domain_name = {{ identity_service.service_domain_name }} +user_domain_name = {{ identity_service.service_domain_name }} +project_name = {{ identity_service.service_project_name }} +username = {{ identity_service.service_user_name }} +password = {{ identity_service.service_password }} +{% endif -%} diff --git a/charms/nova-k8s/src/templates/parts/section-middleware b/charms/nova-k8s/src/templates/parts/section-middleware new file mode 100644 index 00000000..e65f1d98 --- /dev/null +++ b/charms/nova-k8s/src/templates/parts/section-middleware @@ -0,0 +1,6 @@ +{% for section in sections -%} +[{{section}}] +{% for key, value in sections[section].items() -%} +{{ key }} = {{ value }} +{% endfor %} +{%- endfor %} diff --git a/charms/nova-k8s/src/templates/parts/section-signing b/charms/nova-k8s/src/templates/parts/section-signing new file mode 100644 index 00000000..cb7d69ae --- /dev/null +++ b/charms/nova-k8s/src/templates/parts/section-signing @@ -0,0 +1,15 @@ +{% if enable_signing -%} +[signing] +{% if certfile -%} +certfile = {{ certfile }} +{% endif -%} +{% if keyfile -%} +keyfile = {{ keyfile }} +{% endif -%} +{% if ca_certs -%} +ca_certs = {{ ca_certs }} +{% endif -%} +{% if ca_key -%} +ca_key = {{ ca_key }} +{% endif -%} +{% endif -%} \ No newline at end of file diff --git a/charms/nova-k8s/src/templates/wsgi-nova-api.conf.j2 b/charms/nova-k8s/src/templates/wsgi-nova-api.conf.j2 new file mode 100644 index 00000000..fe21298f --- /dev/null +++ b/charms/nova-k8s/src/templates/wsgi-nova-api.conf.j2 @@ -0,0 +1,48 @@ +Listen {{ wsgi_config.public_port }} +Listen {{ wsgi_nova_api.public_port }} + + WSGIDaemonProcess nova-api processes=3 threads=1 user={{ wsgi_config.user }} group={{ wsgi_config.group }} \ + display-name=%{GROUP} + WSGIProcessGroup glance + WSGIScriptAlias / {{ wsgi_config.wsgi_public_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog {{ wsgi_config.error_log }} + CustomLog {{ wsgi_config.custom_log }} combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + + + WSGIDaemonProcess nova-metadata processes=3 threads=1 user={{ wsgi_nova_api.user }} group={{ wsgi_nova_api.group }} \ + display-name=%{GROUP} + WSGIProcessGroup glance + WSGIScriptAlias / {{ wsgi_nova_api.wsgi_public_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog {{ wsgi_nova_api.error_log }} + CustomLog {{ wsgi_nova_api.custom_log }} combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + diff --git a/charms/nova-k8s/test-requirements.txt b/charms/nova-k8s/test-requirements.txt new file mode 100644 index 00000000..8057d2c6 --- /dev/null +++ b/charms/nova-k8s/test-requirements.txt @@ -0,0 +1,17 @@ +# This file is managed centrally. If you find the need to modify this as a +# one-off, please don't. Intead, consult #openstack-charms and ask about +# requirements management in charms via bot-control. Thank you. +charm-tools>=2.4.4 +coverage>=3.6 +mock>=1.2 +flake8>=2.2.4,<=2.4.1 +pyflakes==2.1.1 +stestr>=2.2.0 +requests>=2.18.4 +psutil +# oslo.i18n dropped py35 support +oslo.i18n<4.0.0 +git+https://github.com/openstack-charmers/zaza.git#egg=zaza +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack +pytz # workaround for 14.04 pip/tox +pyudev # for ceph-* charm unit tests (not mocked?) diff --git a/charms/nova-k8s/tests/__init__.py b/charms/nova-k8s/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/charms/nova-k8s/tests/test_charm.py b/charms/nova-k8s/tests/test_charm.py new file mode 100644 index 00000000..e1ccaa76 --- /dev/null +++ b/charms/nova-k8s/tests/test_charm.py @@ -0,0 +1,66 @@ +# Copyright 2022 liam +# See LICENSE file for licensing details. +# +# Learn more about testing at: https://juju.is/docs/sdk/testing + +import unittest +from unittest.mock import Mock + +from charm import SunbeamNovaOperatorCharm +from ops.model import ActiveStatus +from ops.testing import Harness + + +class TestCharm(unittest.TestCase): + def setUp(self): + self.harness = Harness(SunbeamNovaOperatorCharm) + self.addCleanup(self.harness.cleanup) + self.harness.begin() + + def test_config_changed(self): + self.assertEqual(list(self.harness.charm._stored.things), []) + self.harness.update_config({"thing": "foo"}) + self.assertEqual(list(self.harness.charm._stored.things), ["foo"]) + + def test_action(self): + # the harness doesn't (yet!) help much with actions themselves + action_event = Mock(params={"fail": ""}) + self.harness.charm._on_fortune_action(action_event) + + self.assertTrue(action_event.set_results.called) + + def test_action_fail(self): + action_event = Mock(params={"fail": "fail this"}) + self.harness.charm._on_fortune_action(action_event) + + self.assertEqual(action_event.fail.call_args, [("fail this",)]) + + def test_httpbin_pebble_ready(self): + # Check the initial Pebble plan is empty + initial_plan = self.harness.get_container_pebble_plan("httpbin") + self.assertEqual(initial_plan.to_yaml(), "{}\n") + # Expected plan after Pebble ready with default config + expected_plan = { + "services": { + "httpbin": { + "override": "replace", + "summary": "httpbin", + "command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent", + "startup": "enabled", + "environment": {"thing": "🎁"}, + } + }, + } + # Get the httpbin container from the model + container = self.harness.model.unit.get_container("httpbin") + # Emit the PebbleReadyEvent carrying the httpbin container + self.harness.charm.on.httpbin_pebble_ready.emit(container) + # Get the plan now we've run PebbleReady + updated_plan = self.harness.get_container_pebble_plan("httpbin").to_dict() + # Check we've got the plan we expected + self.assertEqual(expected_plan, updated_plan) + # Check the service was started + service = self.harness.model.unit.get_container("httpbin").get_service("httpbin") + self.assertTrue(service.is_running()) + # Ensure we set an ActiveStatus with no message + self.assertEqual(self.harness.model.unit.status, ActiveStatus()) diff --git a/charms/nova-k8s/tox.ini b/charms/nova-k8s/tox.ini new file mode 100644 index 00000000..31301b80 --- /dev/null +++ b/charms/nova-k8s/tox.ini @@ -0,0 +1,134 @@ +# Operator charm (with zaza): tox.ini + +[tox] +envlist = pep8,py3 +skipsdist = True +# NOTE: Avoid build/test env pollution by not enabling sitepackages. +sitepackages = False +# NOTE: Avoid false positives by not skipping missing interpreters. +skip_missing_interpreters = False +# NOTES: +# * We avoid the new dependency resolver by pinning pip < 20.3, see +# https://github.com/pypa/pip/issues/9187 +# * Pinning dependencies requires tox >= 3.2.0, see +# https://tox.readthedocs.io/en/latest/config.html#conf-requires +# * It is also necessary to pin virtualenv as a newer virtualenv would still +# lead to fetching the latest pip in the func* tox targets, see +# https://stackoverflow.com/a/38133283 +requires = pip < 20.3 + virtualenv < 20.0 +# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci +minversion = 3.2.0 + +[testenv] +setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 + CHARM_DIR={envdir} +install_command = + pip install {opts} {packages} +commands = stestr run --slowest {posargs} +whitelist_externals = + git + add-to-archive.py + bash + charmcraft +passenv = HOME TERM CS_* OS_* TEST_* +deps = -r{toxinidir}/test-requirements.txt + +[testenv:py35] +basepython = python3.5 +# python3.5 is irrelevant on a focal+ charm. +commands = /bin/true + +[testenv:py36] +basepython = python3.6 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py37] +basepython = python3.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py38] +basepython = python3.8 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py3] +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:pep8] +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = flake8 {posargs} src unit_tests tests + +[testenv:cover] +# Technique based heavily upon +# https://github.com/openstack/nova/blob/master/tox.ini +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +setenv = + {[testenv]setenv} + PYTHON=coverage run +commands = + coverage erase + stestr run --slowest {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report + +[coverage:run] +branch = True +concurrency = multiprocessing +parallel = True +source = + . +omit = + .tox/* + */charmhelpers/* + unit_tests/* + +[testenv:venv] +basepython = python3 +commands = {posargs} + +[testenv:build] +basepython = python3 +deps = -r{toxinidir}/build-requirements.txt +commands = + charmcraft build + +[testenv:func-noop] +basepython = python3 +commands = + functest-run-suite --help + +[testenv:func] +basepython = python3 +commands = + functest-run-suite --keep-model + +[testenv:func-smoke] +basepython = python3 +commands = + functest-run-suite --keep-model --smoke + +[testenv:func-dev] +basepython = python3 +commands = + functest-run-suite --keep-model --dev + +[testenv:func-target] +basepython = python3 +commands = + functest-run-suite --keep-model --bundle {posargs} + +[flake8] +# Ignore E902 because the unit_tests directory is missing in the built charm. +ignore = E402,E226,E902