Remove deprecated kubernetes_service_patch library

Remove deprecated kubernetes_service_patch library.
Add new class in ops_sunbeam to manage new kubernetes
service of type LoadBalancer.
The service name should be <app_name>-lb.
Use the class in ovn-relay-k8s and designate-bind-k8s
to create new service of type LoadBalancer.
Update open ports in default service definition for
ovn-relay-k8s and designate-bind-k8s.

Remove library kubernetes_service_patch from external
libs and remove any references.

Change-Id: Ic62af0aeaf9f5502d5d7792ed0b182b9a83c2b72
This commit is contained in:
Hemanth Nakkina 2025-02-05 15:49:48 +05:30
parent f218d87642
commit 27b06dbb54
No known key found for this signature in database
GPG Key ID: 2E4970F7B143168E
16 changed files with 179 additions and 370 deletions

View File

@ -1,5 +1,4 @@
external-libraries:
- charms.observability_libs.v1.kubernetes_service_patch
- charms.loki_k8s.v1.loki_push_api
- charms.tempo_k8s.v2.tracing
- charms.tempo_k8s.v1.charm_tracing

View File

@ -2,6 +2,7 @@ ops
jinja2
lightkube
lightkube-models
lightkube-extensions @ git+https://github.com/canonical/lightkube-extensions.git@main
# From ops_sunbeam
tenacity

View File

@ -33,7 +33,6 @@ from typing import (
)
import charms.designate_bind_k8s.v0.bind_rndc as bind_rndc
import charms.observability_libs.v1.kubernetes_service_patch as kubernetes_service_patch
import lightkube.models.core_v1 as core_v1
import ops
import ops.charm
@ -45,6 +44,9 @@ import ops_sunbeam.tracing as sunbeam_tracing
from ops.framework import (
StoredState,
)
from ops_sunbeam.k8s_resource_handlers import (
KubernetesLoadBalancerHandler,
)
logger = logging.getLogger(__name__)
@ -201,18 +203,21 @@ class BindOperatorCharm(sunbeam_charm.OSBaseOperatorCharmK8S):
def __init__(self, *args):
super().__init__(*args)
self.framework.observe(self.on.secret_rotate, self._on_secret_rotate)
self.service = kubernetes_service_patch.KubernetesServicePatch(
service_ports = [
core_v1.ServicePort(
53, appProtocol="domain", name="bind", protocol="UDP"
),
core_v1.ServicePort(
953, appProtocol="rndc", name="rndc", protocol="TCP"
),
]
self.lb_handler = KubernetesLoadBalancerHandler(
self,
ports=[
core_v1.ServicePort(
53, appProtocol="domain", name="bind", protocol="UDP"
),
core_v1.ServicePort(
953, appProtocol="rndc", name="rndc", protocol="TCP"
),
],
service_type="LoadBalancer",
service_ports,
refresh_event=[self.on.install],
)
self.unit.set_ports(53, 953)
def _on_secret_rotate(self, event: ops.SecretRotateEvent):
"""Handle secret rotate event."""

View File

@ -41,9 +41,7 @@ class _BindTestOperatorCharm(charm.BindOperatorCharm):
class TestBindOperatorCharm(test_utils.CharmTestCase):
"""Test charm."""
PATCHES = [
"kubernetes_service_patch",
]
PATCHES = []
def setUp(self):
"""Test setup."""

View File

@ -1,7 +1,6 @@
external-libraries:
- charms.data_platform_libs.v0.data_interfaces
- charms.grafana_agent.v0.cos_agent
- charms.observability_libs.v1.kubernetes_service_patch
- charms.operator_libs_linux.v2.snap
- charms.rabbitmq_k8s.v0.rabbitmq
- charms.traefik_k8s.v2.ingress

View File

@ -1,6 +1,5 @@
external-libraries:
- charms.tls_certificates_interface.v3.tls_certificates
- charms.observability_libs.v1.kubernetes_service_patch
- charms.loki_k8s.v1.loki_push_api
- charms.tempo_k8s.v2.tracing
- charms.tempo_k8s.v1.charm_tracing

View File

@ -9,6 +9,7 @@ jinja2
jsonschema
lightkube
lightkube-models
lightkube-extensions @ git+https://github.com/canonical/lightkube-extensions.git@main
ops
# From ops_sunbeam

View File

@ -44,12 +44,12 @@ import ops_sunbeam.ovn.container_handlers as ovn_chandlers
import ops_sunbeam.ovn.relation_handlers as ovn_relation_handlers
import ops_sunbeam.relation_handlers as sunbeam_rhandlers
import ops_sunbeam.tracing as sunbeam_tracing
from charms.observability_libs.v1.kubernetes_service_patch import (
KubernetesServicePatch,
)
from lightkube.models.core_v1 import (
ServicePort,
)
from ops_sunbeam.k8s_resource_handlers import (
KubernetesLoadBalancerHandler,
)
logger = logging.getLogger(__name__)
@ -95,11 +95,15 @@ class OVNRelayOperatorCharm(ovn_charm.OSBaseOVNOperatorCharm):
def __init__(self, framework):
super().__init__(framework)
self.service_patcher = KubernetesServicePatch(
service_ports = [ServicePort(6642, name="southbound")]
self.lb_handler = KubernetesLoadBalancerHandler(
self,
[ServicePort(6642, name="southbound")],
service_type="LoadBalancer",
service_ports,
refresh_event=[self.on.install],
)
self.unit.set_ports(6642)
self.framework.observe(
self.on.get_southbound_db_url_action,
self._get_southbound_db_url_action,

View File

@ -36,9 +36,7 @@ class _OVNRelayOperatorCharm(charm.OVNRelayOperatorCharm):
class TestOVNRelayOperatorCharm(test_utils.CharmTestCase):
"""Test OVN relay."""
PATCHES = [
"KubernetesServicePatch",
]
PATCHES = []
def setUp(self):
"""Setup OVN relay tests."""

View File

@ -1,5 +1,4 @@
external-libraries:
- charms.observability_libs.v1.kubernetes_service_patch
- charms.grafana_k8s.v0.grafana_dashboard
- charms.loki_k8s.v1.loki_push_api
- charms.certificate_transfer_interface.v0.certificate_transfer

View File

@ -1,341 +0,0 @@
# Copyright 2021 Canonical Ltd.
# See LICENSE file for licensing details.
"""# KubernetesServicePatch Library.
This library is designed to enable developers to more simply patch the Kubernetes Service created
by Juju during the deployment of a sidecar charm. When sidecar charms are deployed, Juju creates a
service named after the application in the namespace (named after the Juju model). This service by
default contains a "placeholder" port, which is 65536/TCP.
When modifying the default set of resources managed by Juju, one must consider the lifecycle of the
charm. In this case, any modifications to the default service (created during deployment), will be
overwritten during a charm upgrade.
When initialised, this library binds a handler to the parent charm's `install` and `upgrade_charm`
events which applies the patch to the cluster. This should ensure that the service ports are
correct throughout the charm's life.
The constructor simply takes a reference to the parent charm, and a list of
[`lightkube`](https://github.com/gtsystem/lightkube) ServicePorts that each define a port for the
service. For information regarding the `lightkube` `ServicePort` model, please visit the
`lightkube` [docs](https://gtsystem.github.io/lightkube-models/1.23/models/core_v1/#serviceport).
Optionally, a name of the service (in case service name needs to be patched as well), labels,
selectors, and annotations can be provided as keyword arguments.
## Getting Started
To get started using the library, you just need to fetch the library using `charmcraft`. **Note
that you also need to add `lightkube` and `lightkube-models` to your charm's `requirements.txt`.**
```shell
cd some-charm
charmcraft fetch-lib charms.observability_libs.v1.kubernetes_service_patch
cat << EOF >> requirements.txt
lightkube
lightkube-models
EOF
```
Then, to initialise the library:
For `ClusterIP` services:
```python
# ...
from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch
from lightkube.models.core_v1 import ServicePort
class SomeCharm(CharmBase):
def __init__(self, *args):
# ...
port = ServicePort(443, name=f"{self.app.name}")
self.service_patcher = KubernetesServicePatch(self, [port])
# ...
```
For `LoadBalancer`/`NodePort` services:
```python
# ...
from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch
from lightkube.models.core_v1 import ServicePort
class SomeCharm(CharmBase):
def __init__(self, *args):
# ...
port = ServicePort(443, name=f"{self.app.name}", targetPort=443, nodePort=30666)
self.service_patcher = KubernetesServicePatch(
self, [port], "LoadBalancer"
)
# ...
```
Port protocols can also be specified. Valid protocols are `"TCP"`, `"UDP"`, and `"SCTP"`
```python
# ...
from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch
from lightkube.models.core_v1 import ServicePort
class SomeCharm(CharmBase):
def __init__(self, *args):
# ...
tcp = ServicePort(443, name=f"{self.app.name}-tcp", protocol="TCP")
udp = ServicePort(443, name=f"{self.app.name}-udp", protocol="UDP")
sctp = ServicePort(443, name=f"{self.app.name}-sctp", protocol="SCTP")
self.service_patcher = KubernetesServicePatch(self, [tcp, udp, sctp])
# ...
```
Bound with custom events by providing `refresh_event` argument:
For example, you would like to have a configurable port in your charm and want to apply
service patch every time charm config is changed.
```python
from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch
from lightkube.models.core_v1 import ServicePort
class SomeCharm(CharmBase):
def __init__(self, *args):
# ...
port = ServicePort(int(self.config["charm-config-port"]), name=f"{self.app.name}")
self.service_patcher = KubernetesServicePatch(
self,
[port],
refresh_event=self.on.config_changed
)
# ...
```
Additionally, you may wish to use mocks in your charm's unit testing to ensure that the library
does not try to make any API calls, or open any files during testing that are unlikely to be
present, and could break your tests. The easiest way to do this is during your test `setUp`:
```python
# ...
@patch("charm.KubernetesServicePatch", lambda x, y: None)
def setUp(self, *unused):
self.harness = Harness(SomeCharm)
# ...
```
"""
import logging
from types import MethodType
from typing import List, Literal, Optional, Union
from lightkube import ApiError, Client # pyright: ignore
from lightkube.core import exceptions
from lightkube.models.core_v1 import ServicePort, ServiceSpec
from lightkube.models.meta_v1 import ObjectMeta
from lightkube.resources.core_v1 import Service
from lightkube.types import PatchType
from ops.charm import CharmBase
from ops.framework import BoundEvent, Object
logger = logging.getLogger(__name__)
# The unique Charmhub library identifier, never change it
LIBID = "0042f86d0a874435adef581806cddbbb"
# Increment this major API version when introducing breaking changes
LIBAPI = 1
# Increment this PATCH version before using `charmcraft publish-lib` or reset
# to 0 if you are raising the major API version
LIBPATCH = 9
ServiceType = Literal["ClusterIP", "LoadBalancer"]
class KubernetesServicePatch(Object):
"""A utility for patching the Kubernetes service set up by Juju."""
def __init__(
self,
charm: CharmBase,
ports: List[ServicePort],
service_name: Optional[str] = None,
service_type: ServiceType = "ClusterIP",
additional_labels: Optional[dict] = None,
additional_selectors: Optional[dict] = None,
additional_annotations: Optional[dict] = None,
*,
refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None,
):
"""Constructor for KubernetesServicePatch.
Args:
charm: the charm that is instantiating the library.
ports: a list of ServicePorts
service_name: allows setting custom name to the patched service. If none given,
application name will be used.
service_type: desired type of K8s service. Default value is in line with ServiceSpec's
default value.
additional_labels: Labels to be added to the kubernetes service (by default only
"app.kubernetes.io/name" is set to the service name)
additional_selectors: Selectors to be added to the kubernetes service (by default only
"app.kubernetes.io/name" is set to the service name)
additional_annotations: Annotations to be added to the kubernetes service.
refresh_event: an optional bound event or list of bound events which
will be observed to re-apply the patch (e.g. on port change).
The `install` and `upgrade-charm` events would be observed regardless.
"""
super().__init__(charm, "kubernetes-service-patch")
self.charm = charm
self.service_name = service_name if service_name else self._app
self.service = self._service_object(
ports,
service_name,
service_type,
additional_labels,
additional_selectors,
additional_annotations,
)
# Make mypy type checking happy that self._patch is a method
assert isinstance(self._patch, MethodType)
# Ensure this patch is applied during the 'install' and 'upgrade-charm' events
self.framework.observe(charm.on.install, self._patch)
self.framework.observe(charm.on.upgrade_charm, self._patch)
self.framework.observe(charm.on.update_status, self._patch)
# apply user defined events
if refresh_event:
if not isinstance(refresh_event, list):
refresh_event = [refresh_event]
for evt in refresh_event:
self.framework.observe(evt, self._patch)
def _service_object(
self,
ports: List[ServicePort],
service_name: Optional[str] = None,
service_type: ServiceType = "ClusterIP",
additional_labels: Optional[dict] = None,
additional_selectors: Optional[dict] = None,
additional_annotations: Optional[dict] = None,
) -> Service:
"""Creates a valid Service representation.
Args:
ports: a list of ServicePorts
service_name: allows setting custom name to the patched service. If none given,
application name will be used.
service_type: desired type of K8s service. Default value is in line with ServiceSpec's
default value.
additional_labels: Labels to be added to the kubernetes service (by default only
"app.kubernetes.io/name" is set to the service name)
additional_selectors: Selectors to be added to the kubernetes service (by default only
"app.kubernetes.io/name" is set to the service name)
additional_annotations: Annotations to be added to the kubernetes service.
Returns:
Service: A valid representation of a Kubernetes Service with the correct ports.
"""
if not service_name:
service_name = self._app
labels = {"app.kubernetes.io/name": self._app}
if additional_labels:
labels.update(additional_labels)
selector = {"app.kubernetes.io/name": self._app}
if additional_selectors:
selector.update(additional_selectors)
return Service(
apiVersion="v1",
kind="Service",
metadata=ObjectMeta(
namespace=self._namespace,
name=service_name,
labels=labels,
annotations=additional_annotations, # type: ignore[arg-type]
),
spec=ServiceSpec(
selector=selector,
ports=ports,
type=service_type,
),
)
def _patch(self, _) -> None:
"""Patch the Kubernetes service created by Juju to map the correct port.
Raises:
PatchFailed: if patching fails due to lack of permissions, or otherwise.
"""
try:
client = Client() # pyright: ignore
except exceptions.ConfigError as e:
logger.warning("Error creating k8s client: %s", e)
return
try:
if self._is_patched(client):
return
if self.service_name != self._app:
self._delete_and_create_service(client)
client.patch(Service, self.service_name, self.service, patch_type=PatchType.MERGE)
except ApiError as e:
if e.status.code == 403:
logger.error("Kubernetes service patch failed: `juju trust` this application.")
else:
logger.error("Kubernetes service patch failed: %s", str(e))
else:
logger.info("Kubernetes service '%s' patched successfully", self._app)
def _delete_and_create_service(self, client: Client):
service = client.get(Service, self._app, namespace=self._namespace)
service.metadata.name = self.service_name # type: ignore[attr-defined]
service.metadata.resourceVersion = service.metadata.uid = None # type: ignore[attr-defined] # noqa: E501
client.delete(Service, self._app, namespace=self._namespace)
client.create(service)
def is_patched(self) -> bool:
"""Reports if the service patch has been applied.
Returns:
bool: A boolean indicating if the service patch has been applied.
"""
client = Client() # pyright: ignore
return self._is_patched(client)
def _is_patched(self, client: Client) -> bool:
# Get the relevant service from the cluster
try:
service = client.get(Service, name=self.service_name, namespace=self._namespace)
except ApiError as e:
if e.status.code == 404 and self.service_name != self._app:
return False
logger.error("Kubernetes service get failed: %s", str(e))
raise
# Construct a list of expected ports, should the patch be applied
expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports] # type: ignore[attr-defined]
# Construct a list in the same manner, using the fetched service
fetched_ports = [
(p.port, p.targetPort) for p in service.spec.ports # type: ignore[attr-defined]
] # noqa: E501
return expected_ports == fetched_ports
@property
def _app(self) -> str:
"""Name of the current Juju application.
Returns:
str: A string containing the name of the current Juju application.
"""
return self.charm.app.name
@property
def _namespace(self) -> str:
"""The Kubernetes namespace we're running in.
Returns:
str: A string containing the name of the current Kubernetes namespace.
"""
with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f:
return f.read().strip()

View File

@ -0,0 +1,145 @@
# Copyright 2025 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles management of kubernetes resources."""
import logging
import ops_sunbeam.tracing as sunbeam_tracing
from lightkube.core.client import (
Client,
)
from lightkube.models.core_v1 import (
ServicePort,
ServiceSpec,
)
from lightkube.models.meta_v1 import (
ObjectMeta,
)
from lightkube.resources.core_v1 import (
Service,
)
from lightkube_extensions.batch import ( # type: ignore[import-untyped, import-not-found]
KubernetesResourceManager,
create_charm_default_labels,
)
from ops.framework import (
BoundEvent,
Object,
)
from ops_sunbeam.charm import (
OSBaseOperatorCharmK8S,
)
logger = logging.getLogger(__name__)
@sunbeam_tracing.trace_type
class KubernetesLoadBalancerHandler(Object):
"""Manage Kubernetes LB services.
Creates a new Kubernetes service of type Loadbalancer
with name as {app.name}-lb. Patch the service on
events defined by the charm.
Remove the kubernetes service on removal of application
or the last unit.
"""
def __init__(
self,
charm: OSBaseOperatorCharmK8S,
service_ports: list[ServicePort],
refresh_event: list[BoundEvent] | None = None,
):
super().__init__(charm, "kubernetes-lb-handler")
self.charm = charm
self._service_ports = service_ports
self._lb_label = f"{self.charm.app.name}-lb"
self._lightkube_client = None
self._lightkube_field_manager: str = self.charm.app.name
self._lb_name: str = f"{self.charm.app.name}-lb"
# apply user defined events
if refresh_event:
if not isinstance(refresh_event, list):
refresh_event = [refresh_event]
for evt in refresh_event:
self.framework.observe(evt, self._reconcile_lb)
# Remove service if the last unit is removed
self.framework.observe(charm.on.remove, self._on_remove)
@property
def lightkube_client(self):
"""Returns a lightkube client configured for this charm."""
if self._lightkube_client is None:
self._lightkube_client = Client(
namespace=self.charm.model.name,
field_manager=self._lightkube_field_manager,
)
return self._lightkube_client
def _get_lb_resource_manager(self):
return KubernetesResourceManager(
labels=create_charm_default_labels(
self.charm.app.name,
self.charm.model.name,
scope=self._lb_label,
),
resource_types={Service},
lightkube_client=self.lightkube_client,
logger=logger,
)
def _construct_lb(self) -> Service:
return Service(
metadata=ObjectMeta(
name=f"{self._lb_name}",
namespace=self.charm.model.name,
labels={"app.kubernetes.io/name": self.charm.app.name},
),
spec=ServiceSpec(
ports=self._service_ports,
selector={"app.kubernetes.io/name": self.charm.app.name},
type="LoadBalancer",
),
)
def _reconcile_lb(self, _) -> None:
"""Reconcile the LoadBalancer's state."""
if not self.charm.unit.is_leader():
return
klm = self._get_lb_resource_manager()
resources_list = [self._construct_lb()]
logger.info(
f"Patching k8s loadbalancer service object {self._lb_name}"
)
klm.reconcile(resources_list)
def _on_remove(self, _) -> None:
if not self.charm.unit.is_leader():
return
# juju scale down on kubernetes charms removes non-leader units.
# So removal of leader unit can be considered as application is
# getting destroyed or all the units are removed. Remove the
# service in this case.
logger.info(
f"Removing k8s loadbalancer service object {self._lb_name}"
)
klm = self._get_lb_resource_manager()
klm.delete()

View File

@ -23,3 +23,5 @@ types-PyYAML # types stub for mypy
# due to https://github.com/corpusops/croniter/issues/144
# we might need to find an alternative for croniter project
croniter==5.0.1 # tempest-k8s
lightkube-extensions @ git+https://github.com/canonical/lightkube-extensions.git@main # ovn-relay-k8s,designate-bind-k8s

View File

@ -331,7 +331,7 @@ applications:
{% endif -%}
base: ubuntu@24.04
scale: 1
trust: false
trust: true
resources:
designate-bind-image: ubuntu/bind9:9.18-22.04_beta
designate:

View File

@ -55,7 +55,7 @@ applications:
{% endif -%}
base: ubuntu@24.04
scale: 1
trust: false
trust: true
resources:
designate-bind-image: ubuntu/bind9:9.18-22.04_beta
designate: