commit a770ed9a24856793291d76a46c39e3fed29610d1 Author: Felipe Reyes Date: Mon May 1 16:40:01 2023 +0200 Initial import diff --git a/charms/heat-k8s/.gitignore b/charms/heat-k8s/.gitignore new file mode 100644 index 00000000..a26d707f --- /dev/null +++ b/charms/heat-k8s/.gitignore @@ -0,0 +1,9 @@ +venv/ +build/ +*.charm +.tox/ +.coverage +__pycache__/ +*.py[cod] +.idea +.vscode/ diff --git a/charms/heat-k8s/CONTRIBUTING.md b/charms/heat-k8s/CONTRIBUTING.md new file mode 100644 index 00000000..2969183e --- /dev/null +++ b/charms/heat-k8s/CONTRIBUTING.md @@ -0,0 +1,33 @@ +# Contributing + +To make contributions to this charm, you'll need a working [development setup](https://juju.is/docs/sdk/dev-setup). + +You can use the environments created by `tox` for development: + +```shell +tox --notest -e unit +source .tox/unit/bin/activate +``` + +## Testing + +This project uses `tox` for managing test environments. There are some pre-configured environments +that can be used for linting and formatting code when you're preparing contributions to the charm: + +```shell +tox -e fmt # update your code according to linting rules +tox -e lint # code style +tox -e unit # unit tests +tox -e integration # integration tests +tox # runs 'lint' and 'unit' environments +``` + +## Build the charm + +Build the charm in this git repository using: + +```shell +charmcraft pack +``` + + + +# heat-k8s + +Charmhub package name: operator-template +More information: https://charmhub.io/heat-k8s + +Describe your charm in one or two sentences. + +## Other resources + + + +- [Read more](https://example.com) + +- [Contributing](CONTRIBUTING.md) + +- See the [Juju SDK documentation](https://juju.is/docs/sdk) for more information about developing and improving charms. diff --git a/charms/heat-k8s/actions.yaml b/charms/heat-k8s/actions.yaml new file mode 100644 index 00000000..88e6195d --- /dev/null +++ b/charms/heat-k8s/actions.yaml @@ -0,0 +1,2 @@ +# NOTE: no actions yet! +{ } diff --git a/charms/heat-k8s/charmcraft.yaml b/charms/heat-k8s/charmcraft.yaml new file mode 100644 index 00000000..5b0da209 --- /dev/null +++ b/charms/heat-k8s/charmcraft.yaml @@ -0,0 +1,30 @@ +type: "charm" +bases: + - build-on: + - name: "ubuntu" + channel: "20.04" + run-on: + - name: "ubuntu" + channel: "20.04" +parts: + update-certificates: + plugin: nil + override-build: | + apt update + apt install -y ca-certificates + update-ca-certificates + + charm: + after: [update-certificates] + build-packages: + - git + - libffi-dev + - libssl-dev + - rustc + - cargo + - pkg-config + charm-binary-python-packages: + - cryptography + - jsonschema + - jinja2 + - git+https://opendev.org/openstack/charm-ops-sunbeam#egg=ops_sunbeam diff --git a/charms/heat-k8s/config.yaml b/charms/heat-k8s/config.yaml new file mode 100644 index 00000000..606b5357 --- /dev/null +++ b/charms/heat-k8s/config.yaml @@ -0,0 +1,27 @@ +options: + debug: + default: False + description: Enable debug logging. + type: boolean + os-admin-hostname: + default: glance.juju + description: | + The hostname or address of the admin endpoints that should be advertised + in the glance image provider. + type: string + os-internal-hostname: + default: glance.juju + description: | + The hostname or address of the internal endpoints that should be advertised + in the glance image provider. + type: string + os-public-hostname: + default: glance.juju + description: | + The hostname or address of the internal endpoints that should be advertised + in the glance image provider. + type: string + region: + default: RegionOne + description: Space delimited list of OpenStack regions + type: string diff --git a/charms/heat-k8s/lib/charms/data_platform_libs/v0/database_requires.py b/charms/heat-k8s/lib/charms/data_platform_libs/v0/database_requires.py new file mode 100644 index 00000000..6f425e71 --- /dev/null +++ b/charms/heat-k8s/lib/charms/data_platform_libs/v0/database_requires.py @@ -0,0 +1,502 @@ +# Copyright 2022 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""[DEPRECATED] Relation 'requires' side abstraction for database relation. + +This library is a uniform interface to a selection of common database +metadata, with added custom events that add convenience to database management, +and methods to consume the application related data. + +Following an example of using the DatabaseCreatedEvent, in the context of the +application charm code: + +```python + +from charms.data_platform_libs.v0.database_requires import ( + DatabaseCreatedEvent, + DatabaseRequires, +) + +class ApplicationCharm(CharmBase): + # Application charm that connects to database charms. + + def __init__(self, *args): + super().__init__(*args) + + # Charm events defined in the database requires charm library. + self.database = DatabaseRequires(self, relation_name="database", database_name="database") + self.framework.observe(self.database.on.database_created, self._on_database_created) + + def _on_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + + # Start application with rendered configuration + self._start_application(config_file) + + # Set active status + self.unit.status = ActiveStatus("received database credentials") +``` + +As shown above, the library provides some custom events to handle specific situations, +which are listed below: + +— database_created: event emitted when the requested database is created. +— endpoints_changed: event emitted when the read/write endpoints of the database have changed. +— read_only_endpoints_changed: event emitted when the read-only endpoints of the database + have changed. Event is not triggered if read/write endpoints changed too. + +If it is needed to connect multiple database clusters to the same relation endpoint +the application charm can implement the same code as if it would connect to only +one database cluster (like the above code example). + +To differentiate multiple clusters connected to the same relation endpoint +the application charm can use the name of the remote application: + +```python + +def _on_database_created(self, event: DatabaseCreatedEvent) -> None: + # Get the remote app name of the cluster that triggered this event + cluster = event.relation.app.name +``` + +It is also possible to provide an alias for each different database cluster/relation. + +So, it is possible to differentiate the clusters in two ways. +The first is to use the remote application name, i.e., `event.relation.app.name`, as above. + +The second way is to use different event handlers to handle each cluster events. +The implementation would be something like the following code: + +```python + +from charms.data_platform_libs.v0.database_requires import ( + DatabaseCreatedEvent, + DatabaseRequires, +) + +class ApplicationCharm(CharmBase): + # Application charm that connects to database charms. + + def __init__(self, *args): + super().__init__(*args) + + # Define the cluster aliases and one handler for each cluster database created event. + self.database = DatabaseRequires( + self, + relation_name="database", + database_name="database", + relations_aliases = ["cluster1", "cluster2"], + ) + self.framework.observe( + self.database.on.cluster1_database_created, self._on_cluster1_database_created + ) + self.framework.observe( + self.database.on.cluster2_database_created, self._on_cluster2_database_created + ) + + def _on_cluster1_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database on the cluster named cluster1 + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + ... + + def _on_cluster2_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database on the cluster named cluster2 + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + ... + +``` +""" + +import json +import logging +from collections import namedtuple +from datetime import datetime +from typing import List, Optional + +from ops.charm import ( + CharmEvents, + RelationChangedEvent, + RelationEvent, + RelationJoinedEvent, +) +from ops.framework import EventSource, Object +from ops.model import Relation + +# The unique Charmhub library identifier, never change it +LIBID = "0241e088ffa9440fb4e3126349b2fb62" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version. +LIBPATCH = 5 + +logger = logging.getLogger(__name__) + + +class DatabaseEvent(RelationEvent): + """Base class for database events.""" + + @property + def endpoints(self) -> Optional[str]: + """Returns a comma separated list of read/write endpoints.""" + return self.relation.data[self.relation.app].get("endpoints") + + @property + def password(self) -> Optional[str]: + """Returns the password for the created user.""" + return self.relation.data[self.relation.app].get("password") + + @property + def read_only_endpoints(self) -> Optional[str]: + """Returns a comma separated list of read only endpoints.""" + return self.relation.data[self.relation.app].get("read-only-endpoints") + + @property + def replset(self) -> Optional[str]: + """Returns the replicaset name. + + MongoDB only. + """ + return self.relation.data[self.relation.app].get("replset") + + @property + def tls(self) -> Optional[str]: + """Returns whether TLS is configured.""" + return self.relation.data[self.relation.app].get("tls") + + @property + def tls_ca(self) -> Optional[str]: + """Returns TLS CA.""" + return self.relation.data[self.relation.app].get("tls-ca") + + @property + def uris(self) -> Optional[str]: + """Returns the connection URIs. + + MongoDB, Redis, OpenSearch and Kafka only. + """ + return self.relation.data[self.relation.app].get("uris") + + @property + def username(self) -> Optional[str]: + """Returns the created username.""" + return self.relation.data[self.relation.app].get("username") + + @property + def version(self) -> Optional[str]: + """Returns the version of the database. + + Version as informed by the database daemon. + """ + return self.relation.data[self.relation.app].get("version") + + +class DatabaseCreatedEvent(DatabaseEvent): + """Event emitted when a new database is created for use on this relation.""" + + +class DatabaseEndpointsChangedEvent(DatabaseEvent): + """Event emitted when the read/write endpoints are changed.""" + + +class DatabaseReadOnlyEndpointsChangedEvent(DatabaseEvent): + """Event emitted when the read only endpoints are changed.""" + + +class DatabaseEvents(CharmEvents): + """Database events. + + This class defines the events that the database can emit. + """ + + database_created = EventSource(DatabaseCreatedEvent) + endpoints_changed = EventSource(DatabaseEndpointsChangedEvent) + read_only_endpoints_changed = EventSource(DatabaseReadOnlyEndpointsChangedEvent) + + +Diff = namedtuple("Diff", "added changed deleted") +Diff.__doc__ = """ +A tuple for storing the diff between two data mappings. + +— added — keys that were added. +— changed — keys that still exist but have new values. +— deleted — keys that were deleted. +""" + + +class DatabaseRequires(Object): + """Requires-side of the database relation.""" + + on = DatabaseEvents() + + def __init__( + self, + charm, + relation_name: str, + database_name: str, + extra_user_roles: str = None, + relations_aliases: List[str] = None, + ): + """Manager of database client relations.""" + super().__init__(charm, relation_name) + self.charm = charm + self.database = database_name + self.extra_user_roles = extra_user_roles + self.local_app = self.charm.model.app + self.local_unit = self.charm.unit + self.relation_name = relation_name + self.relations_aliases = relations_aliases + self.framework.observe( + self.charm.on[relation_name].relation_joined, self._on_relation_joined_event + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, self._on_relation_changed_event + ) + + # Define custom event names for each alias. + if relations_aliases: + # Ensure the number of aliases does not exceed the maximum + # of connections allowed in the specific relation. + relation_connection_limit = self.charm.meta.requires[relation_name].limit + if len(relations_aliases) != relation_connection_limit: + raise ValueError( + f"The number of aliases must match the maximum number of connections allowed in the relation. " + f"Expected {relation_connection_limit}, got {len(relations_aliases)}" + ) + + for relation_alias in relations_aliases: + self.on.define_event(f"{relation_alias}_database_created", DatabaseCreatedEvent) + self.on.define_event( + f"{relation_alias}_endpoints_changed", DatabaseEndpointsChangedEvent + ) + self.on.define_event( + f"{relation_alias}_read_only_endpoints_changed", + DatabaseReadOnlyEndpointsChangedEvent, + ) + + def _assign_relation_alias(self, relation_id: int) -> None: + """Assigns an alias to a relation. + + This function writes in the unit data bag. + + Args: + relation_id: the identifier for a particular relation. + """ + # If no aliases were provided, return immediately. + if not self.relations_aliases: + return + + # Return if an alias was already assigned to this relation + # (like when there are more than one unit joining the relation). + if ( + self.charm.model.get_relation(self.relation_name, relation_id) + .data[self.local_unit] + .get("alias") + ): + return + + # Retrieve the available aliases (the ones that weren't assigned to any relation). + available_aliases = self.relations_aliases[:] + for relation in self.charm.model.relations[self.relation_name]: + alias = relation.data[self.local_unit].get("alias") + if alias: + logger.debug("Alias %s was already assigned to relation %d", alias, relation.id) + available_aliases.remove(alias) + + # Set the alias in the unit relation databag of the specific relation. + relation = self.charm.model.get_relation(self.relation_name, relation_id) + relation.data[self.local_unit].update({"alias": available_aliases[0]}) + + def _diff(self, event: RelationChangedEvent) -> Diff: + """Retrieves the diff of the data in the relation changed databag. + + Args: + event: relation changed event. + + Returns: + a Diff instance containing the added, deleted and changed + keys from the event relation databag. + """ + # Retrieve the old data from the data key in the local unit relation databag. + old_data = json.loads(event.relation.data[self.local_unit].get("data", "{}")) + # Retrieve the new data from the event relation databag. + new_data = { + key: value for key, value in event.relation.data[event.app].items() if key != "data" + } + + # These are the keys that were added to the databag and triggered this event. + added = new_data.keys() - old_data.keys() + # These are the keys that were removed from the databag and triggered this event. + deleted = old_data.keys() - new_data.keys() + # These are the keys that already existed in the databag, + # but had their values changed. + changed = { + key for key in old_data.keys() & new_data.keys() if old_data[key] != new_data[key] + } + + # TODO: evaluate the possibility of losing the diff if some error + # happens in the charm before the diff is completely checked (DPE-412). + # Convert the new_data to a serializable format and save it for a next diff check. + event.relation.data[self.local_unit].update({"data": json.dumps(new_data)}) + + # Return the diff with all possible changes. + return Diff(added, changed, deleted) + + def _emit_aliased_event(self, event: RelationChangedEvent, event_name: str) -> None: + """Emit an aliased event to a particular relation if it has an alias. + + Args: + event: the relation changed event that was received. + event_name: the name of the event to emit. + """ + alias = self._get_relation_alias(event.relation.id) + if alias: + getattr(self.on, f"{alias}_{event_name}").emit( + event.relation, app=event.app, unit=event.unit + ) + + def _get_relation_alias(self, relation_id: int) -> Optional[str]: + """Returns the relation alias. + + Args: + relation_id: the identifier for a particular relation. + + Returns: + the relation alias or None if the relation was not found. + """ + for relation in self.charm.model.relations[self.relation_name]: + if relation.id == relation_id: + return relation.data[self.local_unit].get("alias") + return None + + def fetch_relation_data(self) -> dict: + """Retrieves data from relation. + + This function can be used to retrieve data from a relation + in the charm code when outside an event callback. + + Returns: + a dict of the values stored in the relation data bag + for all relation instances (indexed by the relation ID). + """ + data = {} + for relation in self.relations: + data[relation.id] = { + key: value for key, value in relation.data[relation.app].items() if key != "data" + } + return data + + def _update_relation_data(self, relation_id: int, data: dict) -> None: + """Updates a set of key-value pairs in the relation. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + data: dict containing the key-value pairs + that should be updated in the relation. + """ + if self.local_unit.is_leader(): + relation = self.charm.model.get_relation(self.relation_name, relation_id) + relation.data[self.local_app].update(data) + + def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None: + """Event emitted when the application joins the database relation.""" + # If relations aliases were provided, assign one to the relation. + self._assign_relation_alias(event.relation.id) + + # Sets both database and extra user roles in the relation + # if the roles are provided. Otherwise, sets only the database. + if self.extra_user_roles: + self._update_relation_data( + event.relation.id, + { + "database": self.database, + "extra-user-roles": self.extra_user_roles, + }, + ) + else: + self._update_relation_data(event.relation.id, {"database": self.database}) + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the database relation has changed.""" + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Check if the database is created + # (the database charm shared the credentials). + if "username" in diff.added and "password" in diff.added: + # Emit the default event (the one without an alias). + logger.info("database created at %s", datetime.now()) + self.on.database_created.emit(event.relation, app=event.app, unit=event.unit) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "database_created") + + # To avoid unnecessary application restarts do not trigger + # “endpoints_changed“ event if “database_created“ is triggered. + return + + # Emit an endpoints changed event if the database + # added or changed this info in the relation databag. + if "endpoints" in diff.added or "endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("endpoints changed on %s", datetime.now()) + self.on.endpoints_changed.emit(event.relation, app=event.app, unit=event.unit) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "endpoints_changed") + + # To avoid unnecessary application restarts do not trigger + # “read_only_endpoints_changed“ event if “endpoints_changed“ is triggered. + return + + # Emit a read only endpoints changed event if the database + # added or changed this info in the relation databag. + if "read-only-endpoints" in diff.added or "read-only-endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("read-only-endpoints changed on %s", datetime.now()) + self.on.read_only_endpoints_changed.emit( + event.relation, app=event.app, unit=event.unit + ) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "read_only_endpoints_changed") + + @property + def relations(self) -> List[Relation]: + """The list of Relation instances associated with this relation_name.""" + return list(self.charm.model.relations[self.relation_name]) diff --git a/charms/heat-k8s/lib/charms/keystone_k8s/v1/identity_service.py b/charms/heat-k8s/lib/charms/keystone_k8s/v1/identity_service.py new file mode 100644 index 00000000..35556622 --- /dev/null +++ b/charms/heat-k8s/lib/charms/keystone_k8s/v1/identity_service.py @@ -0,0 +1,518 @@ +"""IdentityServiceProvides and Requires module. + + +This library contains the Requires and Provides classes for handling +the identity_service interface. + +Import `IdentityServiceRequires` in your charm, with the charm object and the +relation name: + - self + - "identity_service" + +Also provide additional parameters to the charm object: + - service + - internal_url + - public_url + - admin_url + - region + - username + - vhost + +Two events are also available to respond to: + - connected + - ready + - goneaway + +A basic example showing the usage of this relation follows: + +``` +from charms.keystone_k8s.v1.identity_service import IdentityServiceRequires + +class IdentityServiceClientCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + # IdentityService Requires + self.identity_service = IdentityServiceRequires( + self, "identity_service", + service = "my-service" + internal_url = "http://internal-url" + public_url = "http://public-url" + admin_url = "http://admin-url" + region = "region" + ) + self.framework.observe( + self.identity_service.on.connected, self._on_identity_service_connected) + self.framework.observe( + self.identity_service.on.ready, self._on_identity_service_ready) + self.framework.observe( + self.identity_service.on.goneaway, self._on_identity_service_goneaway) + + def _on_identity_service_connected(self, event): + '''React to the IdentityService connected event. + + This event happens when n IdentityService relation is added to the + model before credentials etc have been provided. + ''' + # Do something before the relation is complete + pass + + def _on_identity_service_ready(self, event): + '''React to the IdentityService ready event. + + The IdentityService interface will use the provided config for the + request to the identity server. + ''' + # IdentityService Relation is ready. Do something with the completed relation. + pass + + def _on_identity_service_goneaway(self, event): + '''React to the IdentityService goneaway event. + + This event happens when an IdentityService relation is removed. + ''' + # IdentityService Relation has goneaway. shutdown services or suchlike + pass +``` +""" + +import json +import logging + +from ops.framework import ( + StoredState, + EventBase, + ObjectEvents, + EventSource, + Object, +) +from ops.model import ( + Relation, + SecretNotFoundError, +) + +logger = logging.getLogger(__name__) + +# The unique Charmhub library identifier, never change it +LIBID = "0fa7fe7236c14c6e9624acf232b9a3b0" + +# Increment this major API version when introducing breaking changes +LIBAPI = 1 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 0 + + +logger = logging.getLogger(__name__) + + +class IdentityServiceConnectedEvent(EventBase): + """IdentityService connected Event.""" + + pass + + +class IdentityServiceReadyEvent(EventBase): + """IdentityService ready for use Event.""" + + pass + + +class IdentityServiceGoneAwayEvent(EventBase): + """IdentityService relation has gone-away Event""" + + pass + + +class IdentityServiceServerEvents(ObjectEvents): + """Events class for `on`""" + + connected = EventSource(IdentityServiceConnectedEvent) + ready = EventSource(IdentityServiceReadyEvent) + goneaway = EventSource(IdentityServiceGoneAwayEvent) + + +class IdentityServiceRequires(Object): + """ + IdentityServiceRequires class + """ + + on = IdentityServiceServerEvents() + _stored = StoredState() + + def __init__(self, charm, relation_name: str, service_endpoints: dict, + region: str): + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + self.service_endpoints = service_endpoints + self.region = region + self.framework.observe( + self.charm.on[relation_name].relation_joined, + self._on_identity_service_relation_joined, + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, + self._on_identity_service_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_departed, + self._on_identity_service_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_broken, + self._on_identity_service_relation_broken, + ) + + def _on_identity_service_relation_joined(self, event): + """IdentityService relation joined.""" + logging.debug("IdentityService on_joined") + self.on.connected.emit() + self.register_services( + self.service_endpoints, + self.region) + + def _on_identity_service_relation_changed(self, event): + """IdentityService relation changed.""" + logging.debug("IdentityService on_changed") + try: + self.service_password + self.on.ready.emit() + except (AttributeError, KeyError): + pass + + def _on_identity_service_relation_broken(self, event): + """IdentityService relation broken.""" + logging.debug("IdentityService on_broken") + self.on.goneaway.emit() + + @property + def _identity_service_rel(self) -> Relation: + """The IdentityService relation.""" + return self.framework.model.get_relation(self.relation_name) + + def get_remote_app_data(self, key: str) -> str: + """Return the value for the given key from remote app data.""" + data = self._identity_service_rel.data[self._identity_service_rel.app] + return data.get(key) + + @property + def api_version(self) -> str: + """Return the api_version.""" + return self.get_remote_app_data('api-version') + + @property + def auth_host(self) -> str: + """Return the auth_host.""" + return self.get_remote_app_data('auth-host') + + @property + def auth_port(self) -> str: + """Return the auth_port.""" + return self.get_remote_app_data('auth-port') + + @property + def auth_protocol(self) -> str: + """Return the auth_protocol.""" + return self.get_remote_app_data('auth-protocol') + + @property + def internal_host(self) -> str: + """Return the internal_host.""" + return self.get_remote_app_data('internal-host') + + @property + def internal_port(self) -> str: + """Return the internal_port.""" + return self.get_remote_app_data('internal-port') + + @property + def internal_protocol(self) -> str: + """Return the internal_protocol.""" + return self.get_remote_app_data('internal-protocol') + + @property + def admin_domain_name(self) -> str: + """Return the admin_domain_name.""" + return self.get_remote_app_data('admin-domain-name') + + @property + def admin_domain_id(self) -> str: + """Return the admin_domain_id.""" + return self.get_remote_app_data('admin-domain-id') + + @property + def admin_project_name(self) -> str: + """Return the admin_project_name.""" + return self.get_remote_app_data('admin-project-name') + + @property + def admin_project_id(self) -> str: + """Return the admin_project_id.""" + return self.get_remote_app_data('admin-project-id') + + @property + def admin_user_name(self) -> str: + """Return the admin_user_name.""" + return self.get_remote_app_data('admin-user-name') + + @property + def admin_user_id(self) -> str: + """Return the admin_user_id.""" + return self.get_remote_app_data('admin-user-id') + + @property + def service_domain_name(self) -> str: + """Return the service_domain_name.""" + return self.get_remote_app_data('service-domain-name') + + @property + def service_domain_id(self) -> str: + """Return the service_domain_id.""" + return self.get_remote_app_data('service-domain-id') + + @property + def service_host(self) -> str: + """Return the service_host.""" + return self.get_remote_app_data('service-host') + + @property + def service_credentials(self) -> str: + """Return the service_credentials secret.""" + return self.get_remote_app_data('service-credentials') + + @property + def service_password(self) -> str: + """Return the service_password.""" + credentials_id = self.get_remote_app_data('service-credentials') + if not credentials_id: + return None + + try: + credentials = self.charm.model.get_secret(id=credentials_id) + return credentials.get_content().get("password") + except SecretNotFoundError: + logger.warning(f"Secret {credentials_id} not found") + return None + + @property + def service_port(self) -> str: + """Return the service_port.""" + return self.get_remote_app_data('service-port') + + @property + def service_protocol(self) -> str: + """Return the service_protocol.""" + return self.get_remote_app_data('service-protocol') + + @property + def service_project_name(self) -> str: + """Return the service_project_name.""" + return self.get_remote_app_data('service-project-name') + + @property + def service_project_id(self) -> str: + """Return the service_project_id.""" + return self.get_remote_app_data('service-project-id') + + @property + def service_user_name(self) -> str: + """Return the service_user_name.""" + credentials_id = self.get_remote_app_data('service-credentials') + if not credentials_id: + return None + + try: + credentials = self.charm.model.get_secret(id=credentials_id) + return credentials.get_content().get("username") + except SecretNotFoundError: + logger.warning(f"Secret {credentials_id} not found") + return None + + @property + def service_user_id(self) -> str: + """Return the service_user_id.""" + return self.get_remote_app_data('service-user-id') + + @property + def internal_auth_url(self) -> str: + """Return the internal_auth_url.""" + return self.get_remote_app_data('internal-auth-url') + + @property + def admin_auth_url(self) -> str: + """Return the admin_auth_url.""" + return self.get_remote_app_data('admin-auth-url') + + @property + def public_auth_url(self) -> str: + """Return the public_auth_url.""" + return self.get_remote_app_data('public-auth-url') + + def register_services(self, service_endpoints: dict, + region: str) -> None: + """Request access to the IdentityService server.""" + if self.model.unit.is_leader(): + logging.debug("Requesting service registration") + app_data = self._identity_service_rel.data[self.charm.app] + app_data["service-endpoints"] = json.dumps( + service_endpoints, sort_keys=True + ) + app_data["region"] = region + + +class HasIdentityServiceClientsEvent(EventBase): + """Has IdentityServiceClients Event.""" + + pass + + +class ReadyIdentityServiceClientsEvent(EventBase): + """IdentityServiceClients Ready Event.""" + + def __init__(self, handle, relation_id, relation_name, service_endpoints, + region, client_app_name): + super().__init__(handle) + self.relation_id = relation_id + self.relation_name = relation_name + self.service_endpoints = service_endpoints + self.region = region + self.client_app_name = client_app_name + + def snapshot(self): + return { + "relation_id": self.relation_id, + "relation_name": self.relation_name, + "service_endpoints": self.service_endpoints, + "client_app_name": self.client_app_name, + "region": self.region} + + def restore(self, snapshot): + super().restore(snapshot) + self.relation_id = snapshot["relation_id"] + self.relation_name = snapshot["relation_name"] + self.service_endpoints = snapshot["service_endpoints"] + self.region = snapshot["region"] + self.client_app_name = snapshot["client_app_name"] + + +class IdentityServiceClientEvents(ObjectEvents): + """Events class for `on`""" + + has_identity_service_clients = EventSource(HasIdentityServiceClientsEvent) + ready_identity_service_clients = EventSource(ReadyIdentityServiceClientsEvent) + + +class IdentityServiceProvides(Object): + """ + IdentityServiceProvides class + """ + + on = IdentityServiceClientEvents() + _stored = StoredState() + + def __init__(self, charm, relation_name): + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + self.framework.observe( + self.charm.on[relation_name].relation_joined, + self._on_identity_service_relation_joined, + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, + self._on_identity_service_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_broken, + self._on_identity_service_relation_broken, + ) + + def _on_identity_service_relation_joined(self, event): + """Handle IdentityService joined.""" + logging.debug("IdentityService on_joined") + self.on.has_identity_service_clients.emit() + + def _on_identity_service_relation_changed(self, event): + """Handle IdentityService changed.""" + logging.debug("IdentityService on_changed") + REQUIRED_KEYS = [ + 'service-endpoints', + 'region'] + + values = [ + event.relation.data[event.relation.app].get(k) + for k in REQUIRED_KEYS + ] + # Validate data on the relation + if all(values): + service_eps = json.loads( + event.relation.data[event.relation.app]['service-endpoints']) + self.on.ready_identity_service_clients.emit( + event.relation.id, + event.relation.name, + service_eps, + event.relation.data[event.relation.app]['region'], + event.relation.app.name) + + def _on_identity_service_relation_broken(self, event): + """Handle IdentityService broken.""" + logging.debug("IdentityServiceProvides on_departed") + # TODO clear data on the relation + + def set_identity_service_credentials(self, relation_name: int, + relation_id: str, + api_version: str, + auth_host: str, + auth_port: str, + auth_protocol: str, + internal_host: str, + internal_port: str, + internal_protocol: str, + service_host: str, + service_port: str, + service_protocol: str, + admin_domain: str, + admin_project: str, + admin_user: str, + service_domain: str, + service_project: str, + service_user: str, + internal_auth_url: str, + admin_auth_url: str, + public_auth_url: str, + service_credentials: str): + logging.debug("Setting identity_service connection information.") + _identity_service_rel = None + for relation in self.framework.model.relations[relation_name]: + if relation.id == relation_id: + _identity_service_rel = relation + if not _identity_service_rel: + # Relation has disappeared so skip send of data + return + app_data = _identity_service_rel.data[self.charm.app] + app_data["api-version"] = api_version + app_data["auth-host"] = auth_host + app_data["auth-port"] = str(auth_port) + app_data["auth-protocol"] = auth_protocol + app_data["internal-host"] = internal_host + app_data["internal-port"] = str(internal_port) + app_data["internal-protocol"] = internal_protocol + app_data["service-host"] = service_host + app_data["service-port"] = str(service_port) + app_data["service-protocol"] = service_protocol + app_data["admin-domain-name"] = admin_domain.name + app_data["admin-domain-id"] = admin_domain.id + app_data["admin-project-name"] = admin_project.name + app_data["admin-project-id"] = admin_project.id + app_data["admin-user-name"] = admin_user.name + app_data["admin-user-id"] = admin_user.id + app_data["service-domain-name"] = service_domain.name + app_data["service-domain-id"] = service_domain.id + app_data["service-project-name"] = service_project.name + app_data["service-project-id"] = service_project.id + app_data["service-user-id"] = service_user.id + app_data["internal-auth-url"] = internal_auth_url + app_data["admin-auth-url"] = admin_auth_url + app_data["public-auth-url"] = public_auth_url + app_data["service-credentials"] = service_credentials diff --git a/charms/heat-k8s/lib/charms/nginx_ingress_integrator/v0/ingress.py b/charms/heat-k8s/lib/charms/nginx_ingress_integrator/v0/ingress.py new file mode 100644 index 00000000..4cf26164 --- /dev/null +++ b/charms/heat-k8s/lib/charms/nginx_ingress_integrator/v0/ingress.py @@ -0,0 +1,408 @@ +# Copyright 2023 Canonical Ltd. +# Licensed under the Apache2.0, see LICENCE file in charm source for details. +"""Library for the ingress relation. + +This library contains the Requires and Provides classes for handling +the ingress interface. + +Import `IngressRequires` in your charm, with two required options: +- "self" (the charm itself) +- config_dict + +`config_dict` accepts the following keys: +- additional-hostnames +- backend-protocol +- limit-rps +- limit-whitelist +- max-body-size +- owasp-modsecurity-crs +- owasp-modsecurity-custom-rules +- path-routes +- retry-errors +- rewrite-enabled +- rewrite-target +- service-hostname (required) +- service-name (required) +- service-namespace +- service-port (required) +- session-cookie-max-age +- tls-secret-name + +See [the config section](https://charmhub.io/nginx-ingress-integrator/configure) for descriptions +of each, along with the required type. + +As an example, add the following to `src/charm.py`: +``` +from charms.nginx_ingress_integrator.v0.ingress import IngressRequires + +# In your charm's `__init__` method. +self.ingress = IngressRequires(self, { + "service-hostname": self.config["external_hostname"], + "service-name": self.app.name, + "service-port": 80, + } +) + +# In your charm's `config-changed` handler. +self.ingress.update_config({"service-hostname": self.config["external_hostname"]}) +``` +And then add the following to `metadata.yaml`: +``` +requires: + ingress: + interface: ingress +``` +You _must_ register the IngressRequires class as part of the `__init__` method +rather than, for instance, a config-changed event handler, for the relation +changed event to be properly handled. +""" + +import copy +import logging +from typing import Dict + +from ops.charm import CharmBase, CharmEvents, RelationBrokenEvent, RelationChangedEvent +from ops.framework import EventBase, EventSource, Object +from ops.model import BlockedStatus + +INGRESS_RELATION_NAME = "ingress" +INGRESS_PROXY_RELATION_NAME = "ingress-proxy" + +# The unique Charmhub library identifier, never change it +LIBID = "db0af4367506491c91663468fb5caa4c" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 16 + +LOGGER = logging.getLogger(__name__) + +REQUIRED_INGRESS_RELATION_FIELDS = {"service-hostname", "service-name", "service-port"} + +OPTIONAL_INGRESS_RELATION_FIELDS = { + "additional-hostnames", + "backend-protocol", + "limit-rps", + "limit-whitelist", + "max-body-size", + "owasp-modsecurity-crs", + "owasp-modsecurity-custom-rules", + "path-routes", + "retry-errors", + "rewrite-target", + "rewrite-enabled", + "service-namespace", + "session-cookie-max-age", + "tls-secret-name", +} + +RELATION_INTERFACES_MAPPINGS = { + "service-hostname": "host", + "service-name": "name", + "service-namespace": "model", + "service-port": "port", +} +RELATION_INTERFACES_MAPPINGS_VALUES = set(RELATION_INTERFACES_MAPPINGS.values()) + + +class IngressAvailableEvent(EventBase): + """IngressAvailableEvent custom event. + + This event indicates the Ingress provider is available. + """ + + +class IngressProxyAvailableEvent(EventBase): + """IngressProxyAvailableEvent custom event. + + This event indicates the IngressProxy provider is available. + """ + + +class IngressBrokenEvent(RelationBrokenEvent): + """IngressBrokenEvent custom event. + + This event indicates the Ingress provider is broken. + """ + + +class IngressCharmEvents(CharmEvents): + """Custom charm events. + + Attrs: + ingress_available: Event to indicate that Ingress is available. + ingress_proxy_available: Event to indicate that IngressProxy is available. + ingress_broken: Event to indicate that Ingress is broken. + """ + + ingress_available = EventSource(IngressAvailableEvent) + ingress_proxy_available = EventSource(IngressProxyAvailableEvent) + ingress_broken = EventSource(IngressBrokenEvent) + + +class IngressRequires(Object): + """This class defines the functionality for the 'requires' side of the 'ingress' relation. + + Hook events observed: + - relation-changed + + Attrs: + model: Juju model where the charm is deployed. + config_dict: Contains all the configuration options for Ingress. + """ + + def __init__(self, charm: CharmBase, config_dict: Dict) -> None: + """Init function for the IngressRequires class. + + Args: + charm: The charm that requires the ingress relation. + config_dict: Contains all the configuration options for Ingress. + """ + super().__init__(charm, INGRESS_RELATION_NAME) + + self.framework.observe( + charm.on[INGRESS_RELATION_NAME].relation_changed, self._on_relation_changed + ) + + # Set default values. + default_relation_fields = { + "service-namespace": self.model.name, + } + config_dict.update( + (key, value) + for key, value in default_relation_fields.items() + if key not in config_dict or not config_dict[key] + ) + + self.config_dict = self._convert_to_relation_interface(config_dict) + + @staticmethod + def _convert_to_relation_interface(config_dict: Dict) -> Dict: + """Create a new relation dict that conforms with charm-relation-interfaces. + + Args: + config_dict: Ingress configuration that doesn't conform with charm-relation-interfaces. + + Returns: + The Ingress configuration conforming with charm-relation-interfaces. + """ + config_dict = copy.copy(config_dict) + config_dict.update( + (key, config_dict[old_key]) + for old_key, key in RELATION_INTERFACES_MAPPINGS.items() + if old_key in config_dict and config_dict[old_key] + ) + return config_dict + + def _config_dict_errors(self, config_dict: Dict, update_only: bool = False) -> bool: + """Check our config dict for errors. + + Args: + config_dict: Contains all the configuration options for Ingress. + update_only: If the charm needs to update only existing keys. + + Returns: + If we need to update the config dict or not. + """ + blocked_message = "Error in ingress relation, check `juju debug-log`" + unknown = [ + config_key + for config_key in config_dict + if config_key + not in REQUIRED_INGRESS_RELATION_FIELDS + | OPTIONAL_INGRESS_RELATION_FIELDS + | RELATION_INTERFACES_MAPPINGS_VALUES + ] + if unknown: + LOGGER.error( + "Ingress relation error, unknown key(s) in config dictionary found: %s", + ", ".join(unknown), + ) + self.model.unit.status = BlockedStatus(blocked_message) + return True + if not update_only: + missing = tuple( + config_key + for config_key in REQUIRED_INGRESS_RELATION_FIELDS + if config_key not in self.config_dict + ) + if missing: + LOGGER.error( + "Ingress relation error, missing required key(s) in config dictionary: %s", + ", ".join(sorted(missing)), + ) + self.model.unit.status = BlockedStatus(blocked_message) + return True + return False + + def _on_relation_changed(self, event: RelationChangedEvent) -> None: + """Handle the relation-changed event. + + Args: + event: Event triggering the relation-changed hook for the relation. + """ + # `self.unit` isn't available here, so use `self.model.unit`. + if self.model.unit.is_leader(): + if self._config_dict_errors(config_dict=self.config_dict): + return + event.relation.data[self.model.app].update( + (key, str(self.config_dict[key])) for key in self.config_dict + ) + + def update_config(self, config_dict: Dict) -> None: + """Allow for updates to relation. + + Args: + config_dict: Contains all the configuration options for Ingress. + + Attrs: + config_dict: Contains all the configuration options for Ingress. + """ + if self.model.unit.is_leader(): + self.config_dict = self._convert_to_relation_interface(config_dict) + if self._config_dict_errors(self.config_dict, update_only=True): + return + relation = self.model.get_relation(INGRESS_RELATION_NAME) + if relation: + for key in self.config_dict: + relation.data[self.model.app][key] = str(self.config_dict[key]) + + +class IngressBaseProvides(Object): + """Parent class for IngressProvides and IngressProxyProvides. + + Attrs: + model: Juju model where the charm is deployed. + """ + + def __init__(self, charm: CharmBase, relation_name: str) -> None: + """Init function for the IngressProxyProvides class. + + Args: + charm: The charm that provides the ingress-proxy relation. + """ + super().__init__(charm, relation_name) + self.charm = charm + + def _on_relation_changed(self, event: RelationChangedEvent) -> None: + """Handle a change to the ingress/ingress-proxy relation. + + Confirm we have the fields we expect to receive. + + Args: + event: Event triggering the relation-changed hook for the relation. + """ + # `self.unit` isn't available here, so use `self.model.unit`. + if not self.model.unit.is_leader(): + return + + relation_name = event.relation.name + + assert event.app is not None # nosec + if not event.relation.data[event.app]: + LOGGER.info( + "%s hasn't finished configuring, waiting until relation is changed again.", + relation_name, + ) + return + + ingress_data = { + field: event.relation.data[event.app].get(field) + for field in REQUIRED_INGRESS_RELATION_FIELDS | OPTIONAL_INGRESS_RELATION_FIELDS + } + + missing_fields = sorted( + field for field in REQUIRED_INGRESS_RELATION_FIELDS if ingress_data.get(field) is None + ) + + if missing_fields: + LOGGER.warning( + "Missing required data fields for %s relation: %s", + relation_name, + ", ".join(missing_fields), + ) + self.model.unit.status = BlockedStatus( + f"Missing fields for {relation_name}: {', '.join(missing_fields)}" + ) + + if relation_name == INGRESS_RELATION_NAME: + # Conform to charm-relation-interfaces. + if "name" in ingress_data and "port" in ingress_data: + name = ingress_data["name"] + port = ingress_data["port"] + else: + name = ingress_data["service-name"] + port = ingress_data["service-port"] + event.relation.data[self.model.app]["url"] = f"http://{name}:{port}/" + + # Create an event that our charm can use to decide it's okay to + # configure the ingress. + self.charm.on.ingress_available.emit() + elif relation_name == INGRESS_PROXY_RELATION_NAME: + self.charm.on.ingress_proxy_available.emit() + + +class IngressProvides(IngressBaseProvides): + """Class containing the functionality for the 'provides' side of the 'ingress' relation. + + Attrs: + charm: The charm that provides the ingress relation. + + Hook events observed: + - relation-changed + """ + + def __init__(self, charm: CharmBase) -> None: + """Init function for the IngressProvides class. + + Args: + charm: The charm that provides the ingress relation. + """ + super().__init__(charm, INGRESS_RELATION_NAME) + # Observe the relation-changed hook event and bind + # self.on_relation_changed() to handle the event. + self.framework.observe( + charm.on[INGRESS_RELATION_NAME].relation_changed, self._on_relation_changed + ) + self.framework.observe( + charm.on[INGRESS_RELATION_NAME].relation_broken, self._on_relation_broken + ) + + def _on_relation_broken(self, event: RelationBrokenEvent) -> None: + """Handle a relation-broken event in the ingress relation. + + Args: + event: Event triggering the relation-broken hook for the relation. + """ + if not self.model.unit.is_leader(): + return + + # Create an event that our charm can use to remove the ingress resource. + self.charm.on.ingress_broken.emit(event.relation) + + +class IngressProxyProvides(IngressBaseProvides): + """Class containing the functionality for the 'provides' side of the 'ingress-proxy' relation. + + Attrs: + charm: The charm that provides the ingress-proxy relation. + + Hook events observed: + - relation-changed + """ + + def __init__(self, charm: CharmBase) -> None: + """Init function for the IngressProxyProvides class. + + Args: + charm: The charm that provides the ingress-proxy relation. + """ + super().__init__(charm, INGRESS_PROXY_RELATION_NAME) + # Observe the relation-changed hook event and bind + # self.on_relation_changed() to handle the event. + self.framework.observe( + charm.on[INGRESS_PROXY_RELATION_NAME].relation_changed, self._on_relation_changed + ) diff --git a/charms/heat-k8s/lib/charms/observability_libs/v1/kubernetes_service_patch.py b/charms/heat-k8s/lib/charms/observability_libs/v1/kubernetes_service_patch.py new file mode 100644 index 00000000..64dd13ce --- /dev/null +++ b/charms/heat-k8s/lib/charms/observability_libs/v1/kubernetes_service_patch.py @@ -0,0 +1,341 @@ +# Copyright 2021 Canonical Ltd. +# See LICENSE file for licensing details. + +"""# KubernetesServicePatch Library. + +This library is designed to enable developers to more simply patch the Kubernetes Service created +by Juju during the deployment of a sidecar charm. When sidecar charms are deployed, Juju creates a +service named after the application in the namespace (named after the Juju model). This service by +default contains a "placeholder" port, which is 65536/TCP. + +When modifying the default set of resources managed by Juju, one must consider the lifecycle of the +charm. In this case, any modifications to the default service (created during deployment), will be +overwritten during a charm upgrade. + +When initialised, this library binds a handler to the parent charm's `install` and `upgrade_charm` +events which applies the patch to the cluster. This should ensure that the service ports are +correct throughout the charm's life. + +The constructor simply takes a reference to the parent charm, and a list of +[`lightkube`](https://github.com/gtsystem/lightkube) ServicePorts that each define a port for the +service. For information regarding the `lightkube` `ServicePort` model, please visit the +`lightkube` [docs](https://gtsystem.github.io/lightkube-models/1.23/models/core_v1/#serviceport). + +Optionally, a name of the service (in case service name needs to be patched as well), labels, +selectors, and annotations can be provided as keyword arguments. + +## Getting Started + +To get started using the library, you just need to fetch the library using `charmcraft`. **Note +that you also need to add `lightkube` and `lightkube-models` to your charm's `requirements.txt`.** + +```shell +cd some-charm +charmcraft fetch-lib charms.observability_libs.v1.kubernetes_service_patch +cat << EOF >> requirements.txt +lightkube +lightkube-models +EOF +``` + +Then, to initialise the library: + +For `ClusterIP` services: + +```python +# ... +from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch +from lightkube.models.core_v1 import ServicePort + +class SomeCharm(CharmBase): + def __init__(self, *args): + # ... + port = ServicePort(443, name=f"{self.app.name}") + self.service_patcher = KubernetesServicePatch(self, [port]) + # ... +``` + +For `LoadBalancer`/`NodePort` services: + +```python +# ... +from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch +from lightkube.models.core_v1 import ServicePort + +class SomeCharm(CharmBase): + def __init__(self, *args): + # ... + port = ServicePort(443, name=f"{self.app.name}", targetPort=443, nodePort=30666) + self.service_patcher = KubernetesServicePatch( + self, [port], "LoadBalancer" + ) + # ... +``` + +Port protocols can also be specified. Valid protocols are `"TCP"`, `"UDP"`, and `"SCTP"` + +```python +# ... +from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch +from lightkube.models.core_v1 import ServicePort + +class SomeCharm(CharmBase): + def __init__(self, *args): + # ... + tcp = ServicePort(443, name=f"{self.app.name}-tcp", protocol="TCP") + udp = ServicePort(443, name=f"{self.app.name}-udp", protocol="UDP") + sctp = ServicePort(443, name=f"{self.app.name}-sctp", protocol="SCTP") + self.service_patcher = KubernetesServicePatch(self, [tcp, udp, sctp]) + # ... +``` + +Bound with custom events by providing `refresh_event` argument: +For example, you would like to have a configurable port in your charm and want to apply +service patch every time charm config is changed. + +```python +from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch +from lightkube.models.core_v1 import ServicePort + +class SomeCharm(CharmBase): + def __init__(self, *args): + # ... + port = ServicePort(int(self.config["charm-config-port"]), name=f"{self.app.name}") + self.service_patcher = KubernetesServicePatch( + self, + [port], + refresh_event=self.on.config_changed + ) + # ... +``` + +Additionally, you may wish to use mocks in your charm's unit testing to ensure that the library +does not try to make any API calls, or open any files during testing that are unlikely to be +present, and could break your tests. The easiest way to do this is during your test `setUp`: + +```python +# ... + +@patch("charm.KubernetesServicePatch", lambda x, y: None) +def setUp(self, *unused): + self.harness = Harness(SomeCharm) + # ... +``` +""" + +import logging +from types import MethodType +from typing import List, Literal, Optional, Union + +from lightkube import ApiError, Client +from lightkube.core import exceptions +from lightkube.models.core_v1 import ServicePort, ServiceSpec +from lightkube.models.meta_v1 import ObjectMeta +from lightkube.resources.core_v1 import Service +from lightkube.types import PatchType +from ops.charm import CharmBase +from ops.framework import BoundEvent, Object + +logger = logging.getLogger(__name__) + +# The unique Charmhub library identifier, never change it +LIBID = "0042f86d0a874435adef581806cddbbb" + +# Increment this major API version when introducing breaking changes +LIBAPI = 1 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 7 + +ServiceType = Literal["ClusterIP", "LoadBalancer"] + + +class KubernetesServicePatch(Object): + """A utility for patching the Kubernetes service set up by Juju.""" + + def __init__( + self, + charm: CharmBase, + ports: List[ServicePort], + service_name: Optional[str] = None, + service_type: ServiceType = "ClusterIP", + additional_labels: Optional[dict] = None, + additional_selectors: Optional[dict] = None, + additional_annotations: Optional[dict] = None, + *, + refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None, + ): + """Constructor for KubernetesServicePatch. + + Args: + charm: the charm that is instantiating the library. + ports: a list of ServicePorts + service_name: allows setting custom name to the patched service. If none given, + application name will be used. + service_type: desired type of K8s service. Default value is in line with ServiceSpec's + default value. + additional_labels: Labels to be added to the kubernetes service (by default only + "app.kubernetes.io/name" is set to the service name) + additional_selectors: Selectors to be added to the kubernetes service (by default only + "app.kubernetes.io/name" is set to the service name) + additional_annotations: Annotations to be added to the kubernetes service. + refresh_event: an optional bound event or list of bound events which + will be observed to re-apply the patch (e.g. on port change). + The `install` and `upgrade-charm` events would be observed regardless. + """ + super().__init__(charm, "kubernetes-service-patch") + self.charm = charm + self.service_name = service_name if service_name else self._app + self.service = self._service_object( + ports, + service_name, + service_type, + additional_labels, + additional_selectors, + additional_annotations, + ) + + # Make mypy type checking happy that self._patch is a method + assert isinstance(self._patch, MethodType) + # Ensure this patch is applied during the 'install' and 'upgrade-charm' events + self.framework.observe(charm.on.install, self._patch) + self.framework.observe(charm.on.upgrade_charm, self._patch) + self.framework.observe(charm.on.update_status, self._patch) + + # apply user defined events + if refresh_event: + if not isinstance(refresh_event, list): + refresh_event = [refresh_event] + + for evt in refresh_event: + self.framework.observe(evt, self._patch) + + def _service_object( + self, + ports: List[ServicePort], + service_name: Optional[str] = None, + service_type: ServiceType = "ClusterIP", + additional_labels: Optional[dict] = None, + additional_selectors: Optional[dict] = None, + additional_annotations: Optional[dict] = None, + ) -> Service: + """Creates a valid Service representation. + + Args: + ports: a list of ServicePorts + service_name: allows setting custom name to the patched service. If none given, + application name will be used. + service_type: desired type of K8s service. Default value is in line with ServiceSpec's + default value. + additional_labels: Labels to be added to the kubernetes service (by default only + "app.kubernetes.io/name" is set to the service name) + additional_selectors: Selectors to be added to the kubernetes service (by default only + "app.kubernetes.io/name" is set to the service name) + additional_annotations: Annotations to be added to the kubernetes service. + + Returns: + Service: A valid representation of a Kubernetes Service with the correct ports. + """ + if not service_name: + service_name = self._app + labels = {"app.kubernetes.io/name": self._app} + if additional_labels: + labels.update(additional_labels) + selector = {"app.kubernetes.io/name": self._app} + if additional_selectors: + selector.update(additional_selectors) + return Service( + apiVersion="v1", + kind="Service", + metadata=ObjectMeta( + namespace=self._namespace, + name=service_name, + labels=labels, + annotations=additional_annotations, # type: ignore[arg-type] + ), + spec=ServiceSpec( + selector=selector, + ports=ports, + type=service_type, + ), + ) + + def _patch(self, _) -> None: + """Patch the Kubernetes service created by Juju to map the correct port. + + Raises: + PatchFailed: if patching fails due to lack of permissions, or otherwise. + """ + try: + client = Client() + except exceptions.ConfigError as e: + logger.warning("Error creating k8s client: %s", e) + return + + try: + if self._is_patched(client): + return + if self.service_name != self._app: + self._delete_and_create_service(client) + client.patch(Service, self.service_name, self.service, patch_type=PatchType.MERGE) + except ApiError as e: + if e.status.code == 403: + logger.error("Kubernetes service patch failed: `juju trust` this application.") + else: + logger.error("Kubernetes service patch failed: %s", str(e)) + else: + logger.info("Kubernetes service '%s' patched successfully", self._app) + + def _delete_and_create_service(self, client: Client): + service = client.get(Service, self._app, namespace=self._namespace) + service.metadata.name = self.service_name # type: ignore[attr-defined] + service.metadata.resourceVersion = service.metadata.uid = None # type: ignore[attr-defined] # noqa: E501 + client.delete(Service, self._app, namespace=self._namespace) + client.create(service) + + def is_patched(self) -> bool: + """Reports if the service patch has been applied. + + Returns: + bool: A boolean indicating if the service patch has been applied. + """ + client = Client() + return self._is_patched(client) + + def _is_patched(self, client: Client) -> bool: + # Get the relevant service from the cluster + try: + service = client.get(Service, name=self.service_name, namespace=self._namespace) + except ApiError as e: + if e.status.code == 404 and self.service_name != self._app: + return False + logger.error("Kubernetes service get failed: %s", str(e)) + raise + + # Construct a list of expected ports, should the patch be applied + expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports] + # Construct a list in the same manner, using the fetched service + fetched_ports = [ + (p.port, p.targetPort) for p in service.spec.ports # type: ignore[attr-defined] + ] # noqa: E501 + return expected_ports == fetched_ports + + @property + def _app(self) -> str: + """Name of the current Juju application. + + Returns: + str: A string containing the name of the current Juju application. + """ + return self.charm.app.name + + @property + def _namespace(self) -> str: + """The Kubernetes namespace we're running in. + + Returns: + str: A string containing the name of the current Kubernetes namespace. + """ + with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f: + return f.read().strip() diff --git a/charms/heat-k8s/lib/charms/rabbitmq_k8s/v0/rabbitmq.py b/charms/heat-k8s/lib/charms/rabbitmq_k8s/v0/rabbitmq.py new file mode 100644 index 00000000..c7df2409 --- /dev/null +++ b/charms/heat-k8s/lib/charms/rabbitmq_k8s/v0/rabbitmq.py @@ -0,0 +1,286 @@ +"""RabbitMQProvides and Requires module. + +This library contains the Requires and Provides classes for handling +the rabbitmq interface. + +Import `RabbitMQRequires` in your charm, with the charm object and the +relation name: + - self + - "amqp" + +Also provide two additional parameters to the charm object: + - username + - vhost + +Two events are also available to respond to: + - connected + - ready + - goneaway + +A basic example showing the usage of this relation follows: + +``` +from charms.rabbitmq_k8s.v0.rabbitmq import RabbitMQRequires + +class RabbitMQClientCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + # RabbitMQ Requires + self.amqp = RabbitMQRequires( + self, "amqp", + username="myusername", + vhost="vhostname" + ) + self.framework.observe( + self.amqp.on.connected, self._on_amqp_connected) + self.framework.observe( + self.amqp.on.ready, self._on_amqp_ready) + self.framework.observe( + self.amqp.on.goneaway, self._on_amqp_goneaway) + + def _on_amqp_connected(self, event): + '''React to the RabbitMQ connected event. + + This event happens when n RabbitMQ relation is added to the + model before credentials etc have been provided. + ''' + # Do something before the relation is complete + pass + + def _on_amqp_ready(self, event): + '''React to the RabbitMQ ready event. + + The RabbitMQ interface will use the provided username and vhost for the + request to the rabbitmq server. + ''' + # RabbitMQ Relation is ready. Do something with the completed relation. + pass + + def _on_amqp_goneaway(self, event): + '''React to the RabbitMQ goneaway event. + + This event happens when an RabbitMQ relation is removed. + ''' + # RabbitMQ Relation has goneaway. shutdown services or suchlike + pass +``` +""" + +# The unique Charmhub library identifier, never change it +LIBID = "45622352791142fd9cf87232e3bd6f2a" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 1 + +import logging + +from ops.framework import ( + StoredState, + EventBase, + ObjectEvents, + EventSource, + Object, +) + +from ops.model import Relation + +from typing import List + +logger = logging.getLogger(__name__) + + +class RabbitMQConnectedEvent(EventBase): + """RabbitMQ connected Event.""" + + pass + + +class RabbitMQReadyEvent(EventBase): + """RabbitMQ ready for use Event.""" + + pass + + +class RabbitMQGoneAwayEvent(EventBase): + """RabbitMQ relation has gone-away Event""" + + pass + + +class RabbitMQServerEvents(ObjectEvents): + """Events class for `on`""" + + connected = EventSource(RabbitMQConnectedEvent) + ready = EventSource(RabbitMQReadyEvent) + goneaway = EventSource(RabbitMQGoneAwayEvent) + + +class RabbitMQRequires(Object): + """ + RabbitMQRequires class + """ + + on = RabbitMQServerEvents() + + def __init__(self, charm, relation_name: str, username: str, vhost: str): + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + self.username = username + self.vhost = vhost + self.framework.observe( + self.charm.on[relation_name].relation_joined, + self._on_amqp_relation_joined, + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, + self._on_amqp_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_departed, + self._on_amqp_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_broken, + self._on_amqp_relation_broken, + ) + + def _on_amqp_relation_joined(self, event): + """RabbitMQ relation joined.""" + logging.debug("RabbitMQRabbitMQRequires on_joined") + self.on.connected.emit() + self.request_access(self.username, self.vhost) + + def _on_amqp_relation_changed(self, event): + """RabbitMQ relation changed.""" + logging.debug("RabbitMQRabbitMQRequires on_changed/departed") + if self.password: + self.on.ready.emit() + + def _on_amqp_relation_broken(self, event): + """RabbitMQ relation broken.""" + logging.debug("RabbitMQRabbitMQRequires on_broken") + self.on.goneaway.emit() + + @property + def _amqp_rel(self) -> Relation: + """The RabbitMQ relation.""" + return self.framework.model.get_relation(self.relation_name) + + @property + def password(self) -> str: + """Return the RabbitMQ password from the server side of the relation.""" + return self._amqp_rel.data[self._amqp_rel.app].get("password") + + @property + def hostname(self) -> str: + """Return the hostname from the RabbitMQ relation""" + return self._amqp_rel.data[self._amqp_rel.app].get("hostname") + + @property + def ssl_port(self) -> str: + """Return the SSL port from the RabbitMQ relation""" + return self._amqp_rel.data[self._amqp_rel.app].get("ssl_port") + + @property + def ssl_ca(self) -> str: + """Return the SSL port from the RabbitMQ relation""" + return self._amqp_rel.data[self._amqp_rel.app].get("ssl_ca") + + @property + def hostnames(self) -> List[str]: + """Return a list of remote RMQ hosts from the RabbitMQ relation""" + _hosts = [] + for unit in self._amqp_rel.units: + _hosts.append(self._amqp_rel.data[unit].get("ingress-address")) + return _hosts + + def request_access(self, username: str, vhost: str) -> None: + """Request access to the RabbitMQ server.""" + if self.model.unit.is_leader(): + logging.debug("Requesting RabbitMQ user and vhost") + self._amqp_rel.data[self.charm.app]["username"] = username + self._amqp_rel.data[self.charm.app]["vhost"] = vhost + + +class HasRabbitMQClientsEvent(EventBase): + """Has RabbitMQClients Event.""" + + pass + + +class ReadyRabbitMQClientsEvent(EventBase): + """RabbitMQClients Ready Event.""" + + pass + + +class RabbitMQClientEvents(ObjectEvents): + """Events class for `on`""" + + has_amqp_clients = EventSource(HasRabbitMQClientsEvent) + ready_amqp_clients = EventSource(ReadyRabbitMQClientsEvent) + + +class RabbitMQProvides(Object): + """ + RabbitMQProvides class + """ + + on = RabbitMQClientEvents() + + def __init__(self, charm, relation_name, callback): + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + self.callback = callback + self.framework.observe( + self.charm.on[relation_name].relation_joined, + self._on_amqp_relation_joined, + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, + self._on_amqp_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_broken, + self._on_amqp_relation_broken, + ) + + def _on_amqp_relation_joined(self, event): + """Handle RabbitMQ joined.""" + logging.debug("RabbitMQRabbitMQProvides on_joined data={}" + .format(event.relation.data[event.relation.app])) + self.on.has_amqp_clients.emit() + + def _on_amqp_relation_changed(self, event): + """Handle RabbitMQ changed.""" + logging.debug("RabbitMQRabbitMQProvides on_changed data={}" + .format(event.relation.data[event.relation.app])) + # Validate data on the relation + if self.username(event) and self.vhost(event): + self.on.ready_amqp_clients.emit() + if self.charm.unit.is_leader(): + self.callback(event, self.username(event), self.vhost(event)) + else: + logging.warning("Received RabbitMQ changed event without the " + "expected keys ('username', 'vhost') in the " + "application data bag. Incompatible charm in " + "other end of relation?") + + def _on_amqp_relation_broken(self, event): + """Handle RabbitMQ broken.""" + logging.debug("RabbitMQRabbitMQProvides on_departed") + # TODO clear data on the relation + + def username(self, event): + """Return the RabbitMQ username from the client side of the relation.""" + return event.relation.data[event.relation.app].get("username") + + def vhost(self, event): + """Return the RabbitMQ vhost from the client side of the relation.""" + return event.relation.data[event.relation.app].get("vhost") diff --git a/charms/heat-k8s/lib/charms/traefik_k8s/v1/ingress.py b/charms/heat-k8s/lib/charms/traefik_k8s/v1/ingress.py new file mode 100644 index 00000000..b31a24d7 --- /dev/null +++ b/charms/heat-k8s/lib/charms/traefik_k8s/v1/ingress.py @@ -0,0 +1,562 @@ +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. + +r"""# Interface Library for ingress. + +This library wraps relation endpoints using the `ingress` interface +and provides a Python API for both requesting and providing per-application +ingress, with load-balancing occurring across all units. + +## Getting Started + +To get started using the library, you just need to fetch the library using `charmcraft`. + +```shell +cd some-charm +charmcraft fetch-lib charms.traefik_k8s.v1.ingress +``` + +In the `metadata.yaml` of the charm, add the following: + +```yaml +requires: + ingress: + interface: ingress + limit: 1 +``` + +Then, to initialise the library: + +```python +from charms.traefik_k8s.v1.ingress import (IngressPerAppRequirer, + IngressPerAppReadyEvent, IngressPerAppRevokedEvent) + +class SomeCharm(CharmBase): + def __init__(self, *args): + # ... + self.ingress = IngressPerAppRequirer(self, port=80) + # The following event is triggered when the ingress URL to be used + # by this deployment of the `SomeCharm` is ready (or changes). + self.framework.observe( + self.ingress.on.ready, self._on_ingress_ready + ) + self.framework.observe( + self.ingress.on.revoked, self._on_ingress_revoked + ) + + def _on_ingress_ready(self, event: IngressPerAppReadyEvent): + logger.info("This app's ingress URL: %s", event.url) + + def _on_ingress_revoked(self, event: IngressPerAppRevokedEvent): + logger.info("This app no longer has ingress") +""" + +import logging +import socket +import typing +from typing import Any, Dict, Optional, Tuple, Union + +import yaml +from ops.charm import CharmBase, RelationBrokenEvent, RelationEvent +from ops.framework import EventSource, Object, ObjectEvents, StoredState +from ops.model import ModelError, Relation + +# The unique Charmhub library identifier, never change it +LIBID = "e6de2a5cd5b34422a204668f3b8f90d2" + +# Increment this major API version when introducing breaking changes +LIBAPI = 1 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 12 + +DEFAULT_RELATION_NAME = "ingress" +RELATION_INTERFACE = "ingress" + +log = logging.getLogger(__name__) + +try: + import jsonschema + + DO_VALIDATION = True +except ModuleNotFoundError: + log.warning( + "The `ingress` library needs the `jsonschema` package to be able " + "to do runtime data validation; without it, it will still work but validation " + "will be disabled. \n" + "It is recommended to add `jsonschema` to the 'requirements.txt' of your charm, " + "which will enable this feature." + ) + DO_VALIDATION = False + +INGRESS_REQUIRES_APP_SCHEMA = { + "type": "object", + "properties": { + "model": {"type": "string"}, + "name": {"type": "string"}, + "host": {"type": "string"}, + "port": {"type": "string"}, + "strip-prefix": {"type": "string"}, + }, + "required": ["model", "name", "host", "port"], +} + +INGRESS_PROVIDES_APP_SCHEMA = { + "type": "object", + "properties": { + "ingress": {"type": "object", "properties": {"url": {"type": "string"}}}, + }, + "required": ["ingress"], +} + +try: + from typing import TypedDict +except ImportError: + from typing_extensions import TypedDict # py35 compat + +# Model of the data a unit implementing the requirer will need to provide. +RequirerData = TypedDict( + "RequirerData", + {"model": str, "name": str, "host": str, "port": int, "strip-prefix": bool}, + total=False, +) +# Provider ingress data model. +ProviderIngressData = TypedDict("ProviderIngressData", {"url": str}) +# Provider application databag model. +ProviderApplicationData = TypedDict("ProviderApplicationData", {"ingress": ProviderIngressData}) # type: ignore + + +def _validate_data(data, schema): + """Checks whether `data` matches `schema`. + + Will raise DataValidationError if the data is not valid, else return None. + """ + if not DO_VALIDATION: + return + try: + jsonschema.validate(instance=data, schema=schema) + except jsonschema.ValidationError as e: + raise DataValidationError(data, schema) from e + + +class DataValidationError(RuntimeError): + """Raised when data validation fails on IPU relation data.""" + + +class _IngressPerAppBase(Object): + """Base class for IngressPerUnit interface classes.""" + + def __init__(self, charm: CharmBase, relation_name: str = DEFAULT_RELATION_NAME): + super().__init__(charm, relation_name) + + self.charm: CharmBase = charm + self.relation_name = relation_name + self.app = self.charm.app + self.unit = self.charm.unit + + observe = self.framework.observe + rel_events = charm.on[relation_name] + observe(rel_events.relation_created, self._handle_relation) + observe(rel_events.relation_joined, self._handle_relation) + observe(rel_events.relation_changed, self._handle_relation) + observe(rel_events.relation_broken, self._handle_relation_broken) + observe(charm.on.leader_elected, self._handle_upgrade_or_leader) # type: ignore + observe(charm.on.upgrade_charm, self._handle_upgrade_or_leader) # type: ignore + + @property + def relations(self): + """The list of Relation instances associated with this endpoint.""" + return list(self.charm.model.relations[self.relation_name]) + + def _handle_relation(self, event): + """Subclasses should implement this method to handle a relation update.""" + pass + + def _handle_relation_broken(self, event): + """Subclasses should implement this method to handle a relation breaking.""" + pass + + def _handle_upgrade_or_leader(self, event): + """Subclasses should implement this method to handle upgrades or leadership change.""" + pass + + +class _IPAEvent(RelationEvent): + __args__ = () # type: Tuple[str, ...] + __optional_kwargs__ = {} # type: Dict[str, Any] + + @classmethod + def __attrs__(cls): + return cls.__args__ + tuple(cls.__optional_kwargs__.keys()) + + def __init__(self, handle, relation, *args, **kwargs): + super().__init__(handle, relation) + + if not len(self.__args__) == len(args): + raise TypeError("expected {} args, got {}".format(len(self.__args__), len(args))) + + for attr, obj in zip(self.__args__, args): + setattr(self, attr, obj) + for attr, default in self.__optional_kwargs__.items(): + obj = kwargs.get(attr, default) + setattr(self, attr, obj) + + def snapshot(self): + dct = super().snapshot() + for attr in self.__attrs__(): + obj = getattr(self, attr) + try: + dct[attr] = obj + except ValueError as e: + raise ValueError( + "cannot automagically serialize {}: " + "override this method and do it " + "manually.".format(obj) + ) from e + + return dct + + def restore(self, snapshot) -> None: + super().restore(snapshot) + for attr, obj in snapshot.items(): + setattr(self, attr, obj) + + +class IngressPerAppDataProvidedEvent(_IPAEvent): + """Event representing that ingress data has been provided for an app.""" + + __args__ = ("name", "model", "port", "host", "strip_prefix") + + if typing.TYPE_CHECKING: + name = None # type: Optional[str] + model = None # type: Optional[str] + port = None # type: Optional[str] + host = None # type: Optional[str] + strip_prefix = False # type: bool + + +class IngressPerAppDataRemovedEvent(RelationEvent): + """Event representing that ingress data has been removed for an app.""" + + +class IngressPerAppProviderEvents(ObjectEvents): + """Container for IPA Provider events.""" + + data_provided = EventSource(IngressPerAppDataProvidedEvent) + data_removed = EventSource(IngressPerAppDataRemovedEvent) + + +class IngressPerAppProvider(_IngressPerAppBase): + """Implementation of the provider of ingress.""" + + on = IngressPerAppProviderEvents() # type: ignore + + def __init__(self, charm: CharmBase, relation_name: str = DEFAULT_RELATION_NAME): + """Constructor for IngressPerAppProvider. + + Args: + charm: The charm that is instantiating the instance. + relation_name: The name of the relation endpoint to bind to + (defaults to "ingress"). + """ + super().__init__(charm, relation_name) + + def _handle_relation(self, event): + # created, joined or changed: if remote side has sent the required data: + # notify listeners. + if self.is_ready(event.relation): + data = self._get_requirer_data(event.relation) + self.on.data_provided.emit( # type: ignore + event.relation, + data["name"], + data["model"], + data["port"], + data["host"], + data.get("strip-prefix", False), + ) + + def _handle_relation_broken(self, event): + self.on.data_removed.emit(event.relation) # type: ignore + + def wipe_ingress_data(self, relation: Relation): + """Clear ingress data from relation.""" + assert self.unit.is_leader(), "only leaders can do this" + try: + relation.data + except ModelError as e: + log.warning( + "error {} accessing relation data for {!r}. " + "Probably a ghost of a dead relation is still " + "lingering around.".format(e, relation.name) + ) + return + del relation.data[self.app]["ingress"] + + def _get_requirer_data(self, relation: Relation) -> RequirerData: # type: ignore + """Fetch and validate the requirer's app databag. + + For convenience, we convert 'port' to integer. + """ + if not relation.app or not relation.app.name: # type: ignore + # Handle edge case where remote app name can be missing, e.g., + # relation_broken events. + # FIXME https://github.com/canonical/traefik-k8s-operator/issues/34 + return {} + + databag = relation.data[relation.app] + remote_data = {} # type: Dict[str, Union[int, str]] + for k in ("port", "host", "model", "name", "mode", "strip-prefix"): + v = databag.get(k) + if v is not None: + remote_data[k] = v + _validate_data(remote_data, INGRESS_REQUIRES_APP_SCHEMA) + remote_data["port"] = int(remote_data["port"]) + remote_data["strip-prefix"] = bool(remote_data.get("strip-prefix", False)) + return typing.cast(RequirerData, remote_data) + + def get_data(self, relation: Relation) -> RequirerData: # type: ignore + """Fetch the remote app's databag, i.e. the requirer data.""" + return self._get_requirer_data(relation) + + def is_ready(self, relation: Optional[Relation] = None): + """The Provider is ready if the requirer has sent valid data.""" + if not relation: + return any(map(self.is_ready, self.relations)) + + try: + return bool(self._get_requirer_data(relation)) + except DataValidationError as e: + log.warning("Requirer not ready; validation error encountered: %s" % str(e)) + return False + + def _provided_url(self, relation: Relation) -> ProviderIngressData: # type: ignore + """Fetch and validate this app databag; return the ingress url.""" + if not relation.app or not relation.app.name or not self.unit.is_leader(): # type: ignore + # Handle edge case where remote app name can be missing, e.g., + # relation_broken events. + # Also, only leader units can read own app databags. + # FIXME https://github.com/canonical/traefik-k8s-operator/issues/34 + return typing.cast(ProviderIngressData, {}) # noqa + + # fetch the provider's app databag + raw_data = relation.data[self.app].get("ingress") + if not raw_data: + raise RuntimeError("This application did not `publish_url` yet.") + + ingress: ProviderIngressData = yaml.safe_load(raw_data) + _validate_data({"ingress": ingress}, INGRESS_PROVIDES_APP_SCHEMA) + return ingress + + def publish_url(self, relation: Relation, url: str): + """Publish to the app databag the ingress url.""" + ingress = {"url": url} + ingress_data = {"ingress": ingress} + _validate_data(ingress_data, INGRESS_PROVIDES_APP_SCHEMA) + relation.data[self.app]["ingress"] = yaml.safe_dump(ingress) + + @property + def proxied_endpoints(self): + """Returns the ingress settings provided to applications by this IngressPerAppProvider. + + For example, when this IngressPerAppProvider has provided the + `http://foo.bar/my-model.my-app` URL to the my-app application, the returned dictionary + will be: + + ``` + { + "my-app": { + "url": "http://foo.bar/my-model.my-app" + } + } + ``` + """ + results = {} + + for ingress_relation in self.relations: + assert ( + ingress_relation.app + ), "no app in relation (shouldn't happen)" # for type checker + results[ingress_relation.app.name] = self._provided_url(ingress_relation) + + return results + + +class IngressPerAppReadyEvent(_IPAEvent): + """Event representing that ingress for an app is ready.""" + + __args__ = ("url",) + if typing.TYPE_CHECKING: + url = None # type: Optional[str] + + +class IngressPerAppRevokedEvent(RelationEvent): + """Event representing that ingress for an app has been revoked.""" + + +class IngressPerAppRequirerEvents(ObjectEvents): + """Container for IPA Requirer events.""" + + ready = EventSource(IngressPerAppReadyEvent) + revoked = EventSource(IngressPerAppRevokedEvent) + + +class IngressPerAppRequirer(_IngressPerAppBase): + """Implementation of the requirer of the ingress relation.""" + + on = IngressPerAppRequirerEvents() # type: ignore + + # used to prevent spurious urls to be sent out if the event we're currently + # handling is a relation-broken one. + _stored = StoredState() + + def __init__( + self, + charm: CharmBase, + relation_name: str = DEFAULT_RELATION_NAME, + *, + host: Optional[str] = None, + port: Optional[int] = None, + strip_prefix: bool = False, + ): + """Constructor for IngressRequirer. + + The request args can be used to specify the ingress properties when the + instance is created. If any are set, at least `port` is required, and + they will be sent to the ingress provider as soon as it is available. + All request args must be given as keyword args. + + Args: + charm: the charm that is instantiating the library. + relation_name: the name of the relation endpoint to bind to (defaults to `ingress`); + relation must be of interface type `ingress` and have "limit: 1") + host: Hostname to be used by the ingress provider to address the requiring + application; if unspecified, the default Kubernetes service name will be used. + strip_prefix: configure Traefik to strip the path prefix. + + Request Args: + port: the port of the service + """ + super().__init__(charm, relation_name) + self.charm: CharmBase = charm + self.relation_name = relation_name + self._strip_prefix = strip_prefix + + self._stored.set_default(current_url=None) # type: ignore + + # if instantiated with a port, and we are related, then + # we immediately publish our ingress data to speed up the process. + if port: + self._auto_data = host, port + else: + self._auto_data = None + + def _handle_relation(self, event): + # created, joined or changed: if we have auto data: publish it + self._publish_auto_data(event.relation) + + if self.is_ready(): + # Avoid spurious events, emit only when there is a NEW URL available + new_url = ( + None + if isinstance(event, RelationBrokenEvent) + else self._get_url_from_relation_data() + ) + if self._stored.current_url != new_url: # type: ignore + self._stored.current_url = new_url # type: ignore + self.on.ready.emit(event.relation, new_url) # type: ignore + + def _handle_relation_broken(self, event): + self._stored.current_url = None # type: ignore + self.on.revoked.emit(event.relation) # type: ignore + + def _handle_upgrade_or_leader(self, event): + """On upgrade/leadership change: ensure we publish the data we have.""" + for relation in self.relations: + self._publish_auto_data(relation) + + def is_ready(self): + """The Requirer is ready if the Provider has sent valid data.""" + try: + return bool(self._get_url_from_relation_data()) + except DataValidationError as e: + log.warning("Requirer not ready; validation error encountered: %s" % str(e)) + return False + + def _publish_auto_data(self, relation: Relation): + if self._auto_data and self.unit.is_leader(): + host, port = self._auto_data + self.provide_ingress_requirements(host=host, port=port) + + def provide_ingress_requirements(self, *, host: Optional[str] = None, port: int): + """Publishes the data that Traefik needs to provide ingress. + + NB only the leader unit is supposed to do this. + + Args: + host: Hostname to be used by the ingress provider to address the + requirer unit; if unspecified, FQDN will be used instead + port: the port of the service (required) + """ + # get only the leader to publish the data since we only + # require one unit to publish it -- it will not differ between units, + # unlike in ingress-per-unit. + assert self.unit.is_leader(), "only leaders should do this." + assert self.relation, "no relation" + + if not host: + host = socket.getfqdn() + + data = { + "model": self.model.name, + "name": self.app.name, + "host": host, + "port": str(port), + } + + if self._strip_prefix: + data["strip-prefix"] = "true" + + _validate_data(data, INGRESS_REQUIRES_APP_SCHEMA) + self.relation.data[self.app].update(data) + + @property + def relation(self): + """The established Relation instance, or None.""" + return self.relations[0] if self.relations else None + + def _get_url_from_relation_data(self) -> Optional[str]: + """The full ingress URL to reach the current unit. + + Returns None if the URL isn't available yet. + """ + relation = self.relation + if not relation or not relation.app: + return None + + # fetch the provider's app databag + try: + raw = relation.data.get(relation.app, {}).get("ingress") + except ModelError as e: + log.debug( + f"Error {e} attempting to read remote app data; " + f"probably we are in a relation_departed hook" + ) + return None + + if not raw: + return None + + ingress: ProviderIngressData = yaml.safe_load(raw) + _validate_data({"ingress": ingress}, INGRESS_PROVIDES_APP_SCHEMA) + return ingress["url"] + + @property + def url(self) -> Optional[str]: + """The full ingress URL to reach the current unit. + + Returns None if the URL isn't available yet. + """ + data = self._stored.current_url or self._get_url_from_relation_data() # type: ignore + assert isinstance(data, (str, type(None))) # for static checker + return data diff --git a/charms/heat-k8s/metadata.yaml b/charms/heat-k8s/metadata.yaml new file mode 100644 index 00000000..5ce06997 --- /dev/null +++ b/charms/heat-k8s/metadata.yaml @@ -0,0 +1,51 @@ +name: heat-k8s +summary: OpenStack heat service +maintainer: OpenStack Charmers +description: | + Heat is the main project in the OpenStack Orchestration program. It implements an + orchestration engine to launch multiple composite cloud applications based on + templates in the form of text files that can be treated like code. +version: 3 +bases: + - name: ubuntu + channel: 20.04/stable +tags: +- openstack + +containers: + heat-api: + resource: heat-api-image + heat-api-cfn: + resource: heat-api-cfn-image + +resources: + heat-api-image: + type: oci-image + description: OCI image for OpenStack Heat + # docker.io/kolla/ubuntu-binary-heat-api:yoga + upstream-source: docker.io/kolla/ubuntu-binary-heat-api@sha256:ca80d57606525facb404d8b0374701c02609c2ade5cb7e28ba132e666dd85949 + heat-api-cfn-image: + type: oci-image + description: OCI image for OpenStack Heat CFN + # docker.io/kolla/ubuntu-binary-heat-api-cfn:yoga + upstream-source: docker.io/kolla/ubuntu-binary-heat-api-cfn@sha256:6eec5915066b55696414022c86c42360cdbd4b8b1250e06b470fee25af394b66 + +requires: + database: + interface: mysql_client + limit: 1 + identity-service: + interface: keystone + ingress-internal: + interface: ingress + optional: true + limit: 1 + ingress-public: + interface: ingress + limit: 1 + amqp: + interface: rabbitmq + +peers: + peers: + interface: heat-peer diff --git a/charms/heat-k8s/pyproject.toml b/charms/heat-k8s/pyproject.toml new file mode 100644 index 00000000..3f514427 --- /dev/null +++ b/charms/heat-k8s/pyproject.toml @@ -0,0 +1,39 @@ +# Testing tools configuration +[tool.coverage.run] +branch = true + +[tool.coverage.report] +show_missing = true + +[tool.pytest.ini_options] +minversion = "6.0" +log_cli_level = "INFO" + +# Formatting tools configuration +[tool.black] +line-length = 99 +target-version = ["py38"] + +# Linting tools configuration +[tool.ruff] +line-length = 99 +select = ["E", "W", "F", "C", "N", "D", "I001"] +extend-ignore = [ + "D203", + "D204", + "D213", + "D215", + "D400", + "D404", + "D406", + "D407", + "D408", + "D409", + "D413", +] +ignore = ["E501", "D107"] +extend-exclude = ["__pycache__", "*.egg_info"] +per-file-ignores = {"tests/*" = ["D100","D101","D102","D103","D104"]} + +[tool.ruff.mccabe] +max-complexity = 10 diff --git a/charms/heat-k8s/requirements.txt b/charms/heat-k8s/requirements.txt new file mode 100644 index 00000000..99f36564 --- /dev/null +++ b/charms/heat-k8s/requirements.txt @@ -0,0 +1,8 @@ +ops +jinja2 +git+https://github.com/openstack/charm-ops-sunbeam#egg=ops_sunbeam +lightkube +# These are only needeed if the charm relates to ceph +git+https://github.com/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client +# Charmhelpers is only present as interface_ceph_client uses it. +git+https://github.com/juju/charm-helpers.git#egg=charmhelpers diff --git a/charms/heat-k8s/src/charm.py b/charms/heat-k8s/src/charm.py new file mode 100755 index 00000000..92f9f6e8 --- /dev/null +++ b/charms/heat-k8s/src/charm.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 +"""Heat Operator Charm. + +This charm provide Heat services as part of an OpenStack deployment +""" + +import logging + +from ops.framework import StoredState +from ops.main import main + +import ops_sunbeam.charm as sunbeam_charm + +logger = logging.getLogger(__name__) + + +class HeatOperatorCharm(sunbeam_charm.OSBaseOperatorAPICharm): + """Charm the service.""" + + _state = StoredState() + service_name = "heat-api" + wsgi_admin_script = '/usr/bin/heat-api-wsgi' + wsgi_public_script = '/usr/bin/heat-api-wsgi' + + db_sync_cmds = [ + ['heat-manage', 'db_sync'] + ] + + @property + def service_conf(self) -> str: + """Service default configuration file.""" + return f"/etc/heat/heat.conf" + + @property + def service_user(self) -> str: + """Service user file and directory ownership.""" + return 'heat' + + @property + def service_group(self) -> str: + """Service group file and directory ownership.""" + return 'heat' + + @property + def service_endpoints(self): + return [ + { + 'service_name': 'heat', + 'type': 'heat', + 'description': "OpenStack Heat API", + 'internal_url': f'{self.internal_url}', + 'public_url': f'{self.public_url}', + 'admin_url': f'{self.admin_url}'}] + + @property + def default_public_ingress_port(self): + return 8004 + + +if __name__ == "__main__": + main(HeatOperatorCharm) diff --git a/charms/heat-k8s/src/templates/api-paste.ini.j2 b/charms/heat-k8s/src/templates/api-paste.ini.j2 new file mode 100644 index 00000000..220f68d9 --- /dev/null +++ b/charms/heat-k8s/src/templates/api-paste.ini.j2 @@ -0,0 +1,128 @@ +# heat-api composite +[composite:heat-api] +paste.composite_factory = heat.api:root_app_factory +/: api +/healthcheck: healthcheck + +# heat-api composite for standalone heat +# ie. uses alternative auth backend that authenticates users against keystone +# using username and password instead of validating token (which requires +# an admin/service token). +# To enable, in heat.conf: +# [paste_deploy] +# flavor = standalone +# +[composite:heat-api-standalone] +paste.composite_factory = heat.api:root_app_factory +/: api +/healthcheck: healthcheck + +# heat-api composite for custom cloud backends +# i.e. in heat.conf: +# [paste_deploy] +# flavor = custombackend +# +[composite:heat-api-custombackend] +paste.composite_factory = heat.api:root_app_factory +/: api +/healthcheck: healthcheck + +# To enable, in heat.conf: +# [paste_deploy] +# flavor = noauth +# +[composite:heat-api-noauth] +paste.composite_factory = heat.api:root_app_factory +/: api +/healthcheck: healthcheck + +# heat-api-cfn composite +[composite:heat-api-cfn] +paste.composite_factory = heat.api:root_app_factory +/: api-cfn +/healthcheck: healthcheck + +# heat-api-cfn composite for standalone heat +# relies exclusively on authenticating with ec2 signed requests +[composite:heat-api-cfn-standalone] +paste.composite_factory = heat.api:root_app_factory +/: api-cfn +/healthcheck: healthcheck + +[composite:api] +paste.composite_factory = heat.api:pipeline_factory +default = cors request_id faultwrap http_proxy_to_wsgi versionnegotiation authurl authtoken context osprofiler apiv1app +standalone = cors request_id faultwrap http_proxy_to_wsgi versionnegotiation authurl authpassword context apiv1app +custombackend = cors request_id context faultwrap versionnegotiation custombackendauth apiv1app +noauth = cors request_id faultwrap noauth context http_proxy_to_wsgi versionnegotiation apiv1app + +[composite:api-cfn] +paste.composite_factory = heat.api:pipeline_factory +default = cors request_id http_proxy_to_wsgi cfnversionnegotiation ec2authtoken authtoken context osprofiler apicfnv1app +standalone = cors request_id http_proxy_to_wsgi cfnversionnegotiation ec2authtoken context apicfnv1app + +[app:apiv1app] +paste.app_factory = heat.common.wsgi:app_factory +heat.app_factory = heat.api.openstack.v1:API + +[app:apicfnv1app] +paste.app_factory = heat.common.wsgi:app_factory +heat.app_factory = heat.api.cfn.v1:API + +[app:healthcheck] +paste.app_factory = oslo_middleware:Healthcheck.app_factory + +[filter:versionnegotiation] +paste.filter_factory = heat.common.wsgi:filter_factory +heat.filter_factory = heat.api.openstack:version_negotiation_filter + +[filter:cors] +paste.filter_factory = oslo_middleware.cors:filter_factory +oslo_config_project = heat + +[filter:faultwrap] +paste.filter_factory = heat.common.wsgi:filter_factory +heat.filter_factory = heat.api.openstack:faultwrap_filter + +[filter:cfnversionnegotiation] +paste.filter_factory = heat.common.wsgi:filter_factory +heat.filter_factory = heat.api.cfn:version_negotiation_filter + +[filter:cwversionnegotiation] +paste.filter_factory = heat.common.wsgi:filter_factory + +[filter:context] +paste.filter_factory = heat.common.context:ContextMiddleware_filter_factory + +[filter:ec2authtoken] +paste.filter_factory = heat.api.aws.ec2token:EC2Token_filter_factory + +[filter:http_proxy_to_wsgi] +paste.filter_factory = oslo_middleware:HTTPProxyToWSGI.factory + +# Middleware to set auth_url header appropriately +[filter:authurl] +paste.filter_factory = heat.common.auth_url:filter_factory + +# Auth middleware that validates token against keystone +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory + +# Auth middleware that validates username/password against keystone +[filter:authpassword] +paste.filter_factory = heat.common.auth_password:filter_factory + +# Auth middleware that validates against custom backend +[filter:custombackendauth] +paste.filter_factory = heat.common.custom_backend_auth:filter_factory + +# Auth middleware that accepts any auth +[filter:noauth] +paste.filter_factory = heat.common.noauth:filter_factory + +# Middleware to set x-openstack-request-id in http response header +[filter:request_id] +paste.filter_factory = oslo_middleware.request_id:RequestId.factory + +[filter:osprofiler] +paste.filter_factory = osprofiler.web:WsgiMiddleware.factory diff --git a/charms/heat-k8s/src/templates/ceph.conf.j2 b/charms/heat-k8s/src/templates/ceph.conf.j2 new file mode 100644 index 00000000..c293ae90 --- /dev/null +++ b/charms/heat-k8s/src/templates/ceph.conf.j2 @@ -0,0 +1,22 @@ +############################################################################### +# [ WARNING ] +# ceph configuration file maintained in aso +# local changes may be overwritten. +############################################################################### +[global] +{% if ceph.auth -%} +auth_supported = {{ ceph.auth }} +mon host = {{ ceph.mon_hosts }} +{% endif -%} +keyring = /etc/ceph/$cluster.$name.keyring +log to syslog = false +err to syslog = false +clog to syslog = false +{% if ceph.rbd_features %} +rbd default features = {{ ceph.rbd_features }} +{% endif %} + +[client] +{% if ceph_config.rbd_default_data_pool -%} +rbd default data pool = {{ ceph_config.rbd_default_data_pool }} +{% endif %} diff --git a/charms/heat-k8s/src/templates/heat.conf.j2 b/charms/heat-k8s/src/templates/heat.conf.j2 new file mode 100644 index 00000000..bcdc4eef --- /dev/null +++ b/charms/heat-k8s/src/templates/heat.conf.j2 @@ -0,0 +1,26 @@ +[DEFAULT] +debug = {{ options.debug }} + +instance_user=ec2-user +instance_driver=heat.engine.nova +plugin_dirs = /usr/lib64/heat,/usr/lib/heat +environment_dir=/etc/heat/environment.d +deferred_auth_method=password +host=heat +auth_encryption_key={{ encryption_key }} + +transport_url = {{ amqp.transport_url }} + +{% include "parts/section-identity" %} + +[database] +{% include "parts/database-connection" %} + +[paste_deploy] +api_paste_config=/etc/heat/api-paste.ini + +[heat_api] +bind_port=8004 + +[heat_api_cfn] +bind_port=8000 diff --git a/charms/heat-k8s/src/templates/parts/database-connection b/charms/heat-k8s/src/templates/parts/database-connection new file mode 100644 index 00000000..1fd70ce2 --- /dev/null +++ b/charms/heat-k8s/src/templates/parts/database-connection @@ -0,0 +1,3 @@ +{% if database.connection -%} +connection = {{ database.connection }} +{% endif -%} diff --git a/charms/heat-k8s/src/templates/parts/identity-data b/charms/heat-k8s/src/templates/parts/identity-data new file mode 100644 index 00000000..4b4af021 --- /dev/null +++ b/charms/heat-k8s/src/templates/parts/identity-data @@ -0,0 +1,10 @@ +{% if identity_service.internal_host -%} +www_authenticate_uri = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }} +auth_url = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }} +auth_type = password +project_domain_name = {{ identity_service.service_domain_name }} +user_domain_name = {{ identity_service.service_domain_name }} +project_name = {{ identity_service.service_project_name }} +username = {{ identity_service.service_user_name }} +password = {{ identity_service.service_password }} +{% endif -%} diff --git a/charms/heat-k8s/src/templates/parts/section-database b/charms/heat-k8s/src/templates/parts/section-database new file mode 100644 index 00000000..986d9b10 --- /dev/null +++ b/charms/heat-k8s/src/templates/parts/section-database @@ -0,0 +1,3 @@ +[database] +{% include "parts/database-connection" %} +connection_recycle_time = 200 diff --git a/charms/heat-k8s/src/templates/parts/section-federation b/charms/heat-k8s/src/templates/parts/section-federation new file mode 100644 index 00000000..65ee99ed --- /dev/null +++ b/charms/heat-k8s/src/templates/parts/section-federation @@ -0,0 +1,10 @@ +{% if trusted_dashboards %} +[federation] +{% for dashboard_url in trusted_dashboards -%} +trusted_dashboard = {{ dashboard_url }} +{% endfor -%} +{% endif %} +{% for sp in fid_sps -%} +[{{ sp['protocol-name'] }}] +remote_id_attribute = {{ sp['remote-id-attribute'] }} +{% endfor -%} diff --git a/charms/heat-k8s/src/templates/parts/section-identity b/charms/heat-k8s/src/templates/parts/section-identity new file mode 100644 index 00000000..7568a9a4 --- /dev/null +++ b/charms/heat-k8s/src/templates/parts/section-identity @@ -0,0 +1,2 @@ +[keystone_authtoken] +{% include "parts/identity-data" %} diff --git a/charms/heat-k8s/src/templates/parts/section-middleware b/charms/heat-k8s/src/templates/parts/section-middleware new file mode 100644 index 00000000..e65f1d98 --- /dev/null +++ b/charms/heat-k8s/src/templates/parts/section-middleware @@ -0,0 +1,6 @@ +{% for section in sections -%} +[{{section}}] +{% for key, value in sections[section].items() -%} +{{ key }} = {{ value }} +{% endfor %} +{%- endfor %} diff --git a/charms/heat-k8s/src/templates/parts/section-signing b/charms/heat-k8s/src/templates/parts/section-signing new file mode 100644 index 00000000..cb7d69ae --- /dev/null +++ b/charms/heat-k8s/src/templates/parts/section-signing @@ -0,0 +1,15 @@ +{% if enable_signing -%} +[signing] +{% if certfile -%} +certfile = {{ certfile }} +{% endif -%} +{% if keyfile -%} +keyfile = {{ keyfile }} +{% endif -%} +{% if ca_certs -%} +ca_certs = {{ ca_certs }} +{% endif -%} +{% if ca_key -%} +ca_key = {{ ca_key }} +{% endif -%} +{% endif -%} \ No newline at end of file diff --git a/charms/heat-k8s/src/templates/wsgi-heat-api.conf b/charms/heat-k8s/src/templates/wsgi-heat-api.conf new file mode 100644 index 00000000..b34c076e --- /dev/null +++ b/charms/heat-k8s/src/templates/wsgi-heat-api.conf @@ -0,0 +1,27 @@ +Listen {{ wsgi_config.public_port }} + + WSGIDaemonProcess {{ wsgi_config.group }} processes=3 threads=1 user={{ wsgi_config.user }} group={{ wsgi_config.group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ wsgi_config.group }} + {% if ingress_internal.ingress_path -%} + WSGIScriptAlias {{ ingress_internal.ingress_path }} {{ wsgi_config.wsgi_public_script }} + {% endif -%} + WSGIScriptAlias / {{ wsgi_config.wsgi_public_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog {{ wsgi_config.error_log }} + CustomLog {{ wsgi_config.custom_log }} combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + diff --git a/charms/heat-k8s/src/templates/wsgi-template.conf.j2 b/charms/heat-k8s/src/templates/wsgi-template.conf.j2 new file mode 100644 index 00000000..b34c076e --- /dev/null +++ b/charms/heat-k8s/src/templates/wsgi-template.conf.j2 @@ -0,0 +1,27 @@ +Listen {{ wsgi_config.public_port }} + + WSGIDaemonProcess {{ wsgi_config.group }} processes=3 threads=1 user={{ wsgi_config.user }} group={{ wsgi_config.group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ wsgi_config.group }} + {% if ingress_internal.ingress_path -%} + WSGIScriptAlias {{ ingress_internal.ingress_path }} {{ wsgi_config.wsgi_public_script }} + {% endif -%} + WSGIScriptAlias / {{ wsgi_config.wsgi_public_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog {{ wsgi_config.error_log }} + CustomLog {{ wsgi_config.custom_log }} combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + diff --git a/charms/heat-k8s/test-requirements.txt b/charms/heat-k8s/test-requirements.txt new file mode 100644 index 00000000..8057d2c6 --- /dev/null +++ b/charms/heat-k8s/test-requirements.txt @@ -0,0 +1,17 @@ +# This file is managed centrally. If you find the need to modify this as a +# one-off, please don't. Intead, consult #openstack-charms and ask about +# requirements management in charms via bot-control. Thank you. +charm-tools>=2.4.4 +coverage>=3.6 +mock>=1.2 +flake8>=2.2.4,<=2.4.1 +pyflakes==2.1.1 +stestr>=2.2.0 +requests>=2.18.4 +psutil +# oslo.i18n dropped py35 support +oslo.i18n<4.0.0 +git+https://github.com/openstack-charmers/zaza.git#egg=zaza +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack +pytz # workaround for 14.04 pip/tox +pyudev # for ceph-* charm unit tests (not mocked?) diff --git a/charms/heat-k8s/tests/integration/test_charm.py b/charms/heat-k8s/tests/integration/test_charm.py new file mode 100644 index 00000000..7a762d6a --- /dev/null +++ b/charms/heat-k8s/tests/integration/test_charm.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# Copyright 2023 Felipe Reyes +# See LICENSE file for licensing details. + +import asyncio +import logging +from pathlib import Path + +import pytest +import yaml +from pytest_operator.plugin import OpsTest + +logger = logging.getLogger(__name__) + +METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) +APP_NAME = METADATA["name"] + + +@pytest.mark.abort_on_fail +async def test_build_and_deploy(ops_test: OpsTest): + """Build the charm-under-test and deploy it together with related charms. + + Assert on the unit status before any relations/configurations take place. + """ + # Build and deploy charm from local source folder + charm = await ops_test.build_charm(".") + resources = {"httpbin-image": METADATA["resources"]["httpbin-image"]["upstream-source"]} + + # Deploy the charm and wait for active/idle status + await asyncio.gather( + ops_test.model.deploy(charm, resources=resources, application_name=APP_NAME), + ops_test.model.wait_for_idle( + apps=[APP_NAME], status="active", raise_on_blocked=True, timeout=1000 + ), + ) diff --git a/charms/heat-k8s/tests/unit/test_charm.py b/charms/heat-k8s/tests/unit/test_charm.py new file mode 100644 index 00000000..fe9ceb09 --- /dev/null +++ b/charms/heat-k8s/tests/unit/test_charm.py @@ -0,0 +1,74 @@ +# Copyright 2023 Felipe Reyes +# See LICENSE file for licensing details. +# +# Learn more about testing at: https://juju.is/docs/sdk/testing + +import unittest + +import ops.testing +from charm import HeatK8SCharm +from ops.model import ActiveStatus, BlockedStatus, WaitingStatus +from ops.testing import Harness + + +class TestCharm(unittest.TestCase): + def setUp(self): + # Enable more accurate simulation of container networking. + # For more information, see https://juju.is/docs/sdk/testing#heading--simulate-can-connect + ops.testing.SIMULATE_CAN_CONNECT = True + self.addCleanup(setattr, ops.testing, "SIMULATE_CAN_CONNECT", False) + + self.harness = Harness(HeatK8SCharm) + self.addCleanup(self.harness.cleanup) + self.harness.begin() + + def test_httpbin_pebble_ready(self): + # Expected plan after Pebble ready with default config + expected_plan = { + "services": { + "httpbin": { + "override": "replace", + "summary": "httpbin", + "command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent", + "startup": "enabled", + "environment": {"GUNICORN_CMD_ARGS": "--log-level info"}, + } + }, + } + # Simulate the container coming up and emission of pebble-ready event + self.harness.container_pebble_ready("httpbin") + # Get the plan now we've run PebbleReady + updated_plan = self.harness.get_container_pebble_plan("httpbin").to_dict() + # Check we've got the plan we expected + self.assertEqual(expected_plan, updated_plan) + # Check the service was started + service = self.harness.model.unit.get_container("httpbin").get_service("httpbin") + self.assertTrue(service.is_running()) + # Ensure we set an ActiveStatus with no message + self.assertEqual(self.harness.model.unit.status, ActiveStatus()) + + def test_config_changed_valid_can_connect(self): + # Ensure the simulated Pebble API is reachable + self.harness.set_can_connect("httpbin", True) + # Trigger a config-changed event with an updated value + self.harness.update_config({"log-level": "debug"}) + # Get the plan now we've run PebbleReady + updated_plan = self.harness.get_container_pebble_plan("httpbin").to_dict() + updated_env = updated_plan["services"]["httpbin"]["environment"] + # Check the config change was effective + self.assertEqual(updated_env, {"GUNICORN_CMD_ARGS": "--log-level debug"}) + self.assertEqual(self.harness.model.unit.status, ActiveStatus()) + + def test_config_changed_valid_cannot_connect(self): + # Trigger a config-changed event with an updated value + self.harness.update_config({"log-level": "debug"}) + # Check the charm is in WaitingStatus + self.assertIsInstance(self.harness.model.unit.status, WaitingStatus) + + def test_config_changed_invalid(self): + # Ensure the simulated Pebble API is reachable + self.harness.set_can_connect("httpbin", True) + # Trigger a config-changed event with an updated value + self.harness.update_config({"log-level": "foobar"}) + # Check the charm is in BlockedStatus + self.assertIsInstance(self.harness.model.unit.status, BlockedStatus) diff --git a/charms/heat-k8s/tox.ini b/charms/heat-k8s/tox.ini new file mode 100644 index 00000000..31301b80 --- /dev/null +++ b/charms/heat-k8s/tox.ini @@ -0,0 +1,134 @@ +# Operator charm (with zaza): tox.ini + +[tox] +envlist = pep8,py3 +skipsdist = True +# NOTE: Avoid build/test env pollution by not enabling sitepackages. +sitepackages = False +# NOTE: Avoid false positives by not skipping missing interpreters. +skip_missing_interpreters = False +# NOTES: +# * We avoid the new dependency resolver by pinning pip < 20.3, see +# https://github.com/pypa/pip/issues/9187 +# * Pinning dependencies requires tox >= 3.2.0, see +# https://tox.readthedocs.io/en/latest/config.html#conf-requires +# * It is also necessary to pin virtualenv as a newer virtualenv would still +# lead to fetching the latest pip in the func* tox targets, see +# https://stackoverflow.com/a/38133283 +requires = pip < 20.3 + virtualenv < 20.0 +# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci +minversion = 3.2.0 + +[testenv] +setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 + CHARM_DIR={envdir} +install_command = + pip install {opts} {packages} +commands = stestr run --slowest {posargs} +whitelist_externals = + git + add-to-archive.py + bash + charmcraft +passenv = HOME TERM CS_* OS_* TEST_* +deps = -r{toxinidir}/test-requirements.txt + +[testenv:py35] +basepython = python3.5 +# python3.5 is irrelevant on a focal+ charm. +commands = /bin/true + +[testenv:py36] +basepython = python3.6 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py37] +basepython = python3.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py38] +basepython = python3.8 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py3] +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:pep8] +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = flake8 {posargs} src unit_tests tests + +[testenv:cover] +# Technique based heavily upon +# https://github.com/openstack/nova/blob/master/tox.ini +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +setenv = + {[testenv]setenv} + PYTHON=coverage run +commands = + coverage erase + stestr run --slowest {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report + +[coverage:run] +branch = True +concurrency = multiprocessing +parallel = True +source = + . +omit = + .tox/* + */charmhelpers/* + unit_tests/* + +[testenv:venv] +basepython = python3 +commands = {posargs} + +[testenv:build] +basepython = python3 +deps = -r{toxinidir}/build-requirements.txt +commands = + charmcraft build + +[testenv:func-noop] +basepython = python3 +commands = + functest-run-suite --help + +[testenv:func] +basepython = python3 +commands = + functest-run-suite --keep-model + +[testenv:func-smoke] +basepython = python3 +commands = + functest-run-suite --keep-model --smoke + +[testenv:func-dev] +basepython = python3 +commands = + functest-run-suite --keep-model --dev + +[testenv:func-target] +basepython = python3 +commands = + functest-run-suite --keep-model --bundle {posargs} + +[flake8] +# Ignore E902 because the unit_tests directory is missing in the built charm. +ignore = E402,E226,E902