\n\n\n",
- "mode": "html"
- },
- "pluginVersion": "7.4.3",
- "targets": [
- {
- "expr": "",
- "instant": true,
- "refId": "A"
- }
- ],
- "timeFrom": null,
- "timeShift": null,
- "title": "",
- "transparent": true,
- "type": "text"
- }
- ],
- "refresh": "30s",
- "schemaVersion": 27,
- "style": "dark",
- "tags": [],
- "templating": {
- "list": [
- {
- "current": {
- "selected": false,
- "text": "No data sources found",
- "value": ""
- },
- "description": null,
- "error": null,
- "hide": 2,
- "includeAll": false,
- "label": "",
- "multi": false,
- "name": "datasource",
- "options": [],
- "query": "prometheus",
- "refresh": 1,
- "regex": "/$ds/",
- "skipUrlSync": false,
- "type": "datasource"
- }
- ]
- },
- "time": {
- "from": "now-15m",
- "to": "now"
- },
- "timepicker": {
- "refresh_intervals": [
- "5s",
- "10s",
- "30s",
- "1m",
- "5m",
- "15m",
- "30m",
- "1h",
- "2h",
- "1d"
- ],
- "time_options": [
- "5m",
- "15m",
- "1h",
- "6h",
- "12h",
- "24h",
- "2d",
- "7d",
- "30d"
- ]
- },
- "timezone": "browser",
- "title": "OSM Status Summary",
- "uid": "4XuPd2Ii1",
- "version": 12
-}
\ No newline at end of file
diff --git a/installers/charm/grafana/tests/__init__.py b/installers/charm/grafana/tests/__init__.py
deleted file mode 100644
index 446d5cee..00000000
--- a/installers/charm/grafana/tests/__init__.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2020 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-"""Init mocking for unit tests."""
-
-import sys
-
-
-import mock
-
-
-class OCIImageResourceErrorMock(Exception):
- pass
-
-
-sys.path.append("src")
-
-oci_image = mock.MagicMock()
-oci_image.OCIImageResourceError = OCIImageResourceErrorMock
-sys.modules["oci_image"] = oci_image
-sys.modules["oci_image"].OCIImageResource().fetch.return_value = {}
diff --git a/installers/charm/grafana/tests/test_charm.py b/installers/charm/grafana/tests/test_charm.py
deleted file mode 100644
index 3bfd69c7..00000000
--- a/installers/charm/grafana/tests/test_charm.py
+++ /dev/null
@@ -1,703 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2020 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-import sys
-from typing import NoReturn
-import unittest
-from unittest.mock import patch
-
-from charm import GrafanaCharm
-from ops.model import ActiveStatus, BlockedStatus
-from ops.testing import Harness
-
-
-class TestCharm(unittest.TestCase):
- """Prometheus Charm unit tests."""
-
- def setUp(self) -> NoReturn:
- """Test setup"""
- self.image_info = sys.modules["oci_image"].OCIImageResource().fetch()
- self.harness = Harness(GrafanaCharm)
- self.harness.set_leader(is_leader=True)
- self.harness.begin()
- self.config = {
- "max_file_size": 0,
- "ingress_whitelist_source_range": "",
- "tls_secret_name": "",
- "site_url": "https://grafana.192.168.100.100.nip.io",
- "cluster_issuer": "vault-issuer",
- "osm_dashboards": True,
- }
- self.harness.update_config(self.config)
-
- def test_config_changed(
- self,
- ) -> NoReturn:
- """Test ingress resources without HTTP."""
-
- self.harness.charm.on.config_changed.emit()
-
- # Assertions
- self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
- self.assertTrue("prometheus" in self.harness.charm.unit.status.message)
-
- def test_config_changed_non_leader(
- self,
- ) -> NoReturn:
- """Test ingress resources without HTTP."""
- self.harness.set_leader(is_leader=False)
- self.harness.charm.on.config_changed.emit()
-
- # Assertions
- self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)
-
- @patch("opslib.osm.interfaces.grafana.GrafanaCluster.set_initial_password")
- def test_with_db_relation_and_prometheus(self, _) -> NoReturn:
- self.initialize_prometheus_relation()
- self.initialize_mysql_relation()
- self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)
-
- @patch("opslib.osm.interfaces.grafana.GrafanaCluster.set_initial_password")
- def test_with_db_config_and_prometheus(self, _) -> NoReturn:
- self.initialize_prometheus_relation()
- self.initialize_mysql_config()
- self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)
-
- def test_with_prometheus(
- self,
- ) -> NoReturn:
- """Test to see if prometheus relation is updated."""
- self.initialize_prometheus_relation()
- # Verifying status
- self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
- def test_with_db_config(self) -> NoReturn:
- "Test with mysql config"
- self.initialize_mysql_config()
- # Verifying status
- self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
- @patch("opslib.osm.interfaces.grafana.GrafanaCluster.set_initial_password")
- def test_with_db_relations(self, _) -> NoReturn:
- "Test with relations"
- self.initialize_mysql_relation()
- # Verifying status
- self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
- def test_exception_db_relation_and_config(
- self,
- ) -> NoReturn:
- "Test with relations and config. Must throw exception"
- self.initialize_mysql_config()
- self.initialize_mysql_relation()
- # Verifying status
- self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
- def initialize_prometheus_relation(self):
- relation_id = self.harness.add_relation("prometheus", "prometheus")
- self.harness.add_relation_unit(relation_id, "prometheus/0")
- self.harness.update_relation_data(
- relation_id,
- "prometheus",
- {"hostname": "prometheus", "port": 9090},
- )
-
- def initialize_mysql_config(self):
- self.harness.update_config(
- {"mysql_uri": "mysql://grafana:$grafanapw$@host:3606/db"}
- )
-
- def initialize_mysql_relation(self):
- relation_id = self.harness.add_relation("db", "mysql")
- self.harness.add_relation_unit(relation_id, "mysql/0")
- self.harness.update_relation_data(
- relation_id,
- "mysql/0",
- {
- "host": "mysql",
- "port": 3306,
- "user": "mano",
- "password": "manopw",
- "root_password": "rootmanopw",
- },
- )
-
-
-if __name__ == "__main__":
- unittest.main()
-
-# class TestCharm(unittest.TestCase):
-# """Grafana Charm unit tests."""
-
-# def setUp(self) -> NoReturn:
-# """Test setup"""
-# self.harness = Harness(GrafanaCharm)
-# self.harness.set_leader(is_leader=True)
-# self.harness.begin()
-
-# def test_on_start_without_relations(self) -> NoReturn:
-# """Test installation without any relation."""
-# self.harness.charm.on.start.emit()
-
-# # Verifying status
-# self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-# # Verifying status message
-# self.assertGreater(len(self.harness.charm.unit.status.message), 0)
-# self.assertTrue(
-# self.harness.charm.unit.status.message.startswith("Waiting for ")
-# )
-# self.assertIn("prometheus", self.harness.charm.unit.status.message)
-# self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation"))
-
-# def test_on_start_with_relations_without_http(self) -> NoReturn:
-# """Test deployment."""
-# expected_result = {
-# "version": 3,
-# "containers": [
-# {
-# "name": "grafana",
-# "imageDetails": self.harness.charm.image.fetch(),
-# "imagePullPolicy": "Always",
-# "ports": [
-# {
-# "name": "grafana",
-# "containerPort": 3000,
-# "protocol": "TCP",
-# }
-# ],
-# "envConfig": {},
-# "volumeConfig": [
-# {
-# "name": "dashboards",
-# "mountPath": "/etc/grafana/provisioning/dashboards/",
-# "files": [
-# {
-# "path": "dashboard-osm.yml",
-# "content": (
-# "apiVersion: 1\n"
-# "providers:\n"
-# " - name: 'osm'\n"
-# " orgId: 1\n"
-# " folder: ''\n"
-# " type: file\n"
-# " options:\n"
-# " path: /etc/grafana/provisioning/dashboards/\n"
-# ),
-# },
-# ],
-# },
-# {
-# "name": "datasources",
-# "mountPath": "/etc/grafana/provisioning/datasources/",
-# "files": [
-# {
-# "path": "datasource-prometheus.yml",
-# "content": (
-# "datasources:\n"
-# " - access: proxy\n"
-# " editable: true\n"
-# " is_default: true\n"
-# " name: osm_prometheus\n"
-# " orgId: 1\n"
-# " type: prometheus\n"
-# " version: 1\n"
-# " url: http://prometheus:9090\n"
-# ),
-# },
-# ],
-# },
-# ],
-# "kubernetes": {
-# "readinessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 3000,
-# },
-# "initialDelaySeconds": 10,
-# "periodSeconds": 10,
-# "timeoutSeconds": 5,
-# "successThreshold": 1,
-# "failureThreshold": 3,
-# },
-# "livenessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 3000,
-# },
-# "initialDelaySeconds": 60,
-# "timeoutSeconds": 30,
-# "failureThreshold": 10,
-# },
-# },
-# },
-# ],
-# "kubernetesResources": {"ingressResources": []},
-# }
-
-# self.harness.charm.on.start.emit()
-
-# # Initializing the prometheus relation
-# relation_id = self.harness.add_relation("prometheus", "prometheus")
-# self.harness.add_relation_unit(relation_id, "prometheus/0")
-# self.harness.update_relation_data(
-# relation_id,
-# "prometheus",
-# {
-# "hostname": "prometheus",
-# "port": "9090",
-# },
-# )
-
-# # Verifying status
-# self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-# pod_spec, _ = self.harness.get_pod_spec()
-
-# self.assertDictEqual(expected_result, pod_spec)
-
-# def test_ingress_resources_with_http(self) -> NoReturn:
-# """Test ingress resources with HTTP."""
-# expected_result = {
-# "version": 3,
-# "containers": [
-# {
-# "name": "grafana",
-# "imageDetails": self.harness.charm.image.fetch(),
-# "imagePullPolicy": "Always",
-# "ports": [
-# {
-# "name": "grafana",
-# "containerPort": 3000,
-# "protocol": "TCP",
-# }
-# ],
-# "envConfig": {},
-# "volumeConfig": [
-# {
-# "name": "dashboards",
-# "mountPath": "/etc/grafana/provisioning/dashboards/",
-# "files": [
-# {
-# "path": "dashboard-osm.yml",
-# "content": (
-# "apiVersion: 1\n"
-# "providers:\n"
-# " - name: 'osm'\n"
-# " orgId: 1\n"
-# " folder: ''\n"
-# " type: file\n"
-# " options:\n"
-# " path: /etc/grafana/provisioning/dashboards/\n"
-# ),
-# },
-# ],
-# },
-# {
-# "name": "datasources",
-# "mountPath": "/etc/grafana/provisioning/datasources/",
-# "files": [
-# {
-# "path": "datasource-prometheus.yml",
-# "content": (
-# "datasources:\n"
-# " - access: proxy\n"
-# " editable: true\n"
-# " is_default: true\n"
-# " name: osm_prometheus\n"
-# " orgId: 1\n"
-# " type: prometheus\n"
-# " version: 1\n"
-# " url: http://prometheus:9090\n"
-# ),
-# },
-# ],
-# },
-# ],
-# "kubernetes": {
-# "readinessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 3000,
-# },
-# "initialDelaySeconds": 10,
-# "periodSeconds": 10,
-# "timeoutSeconds": 5,
-# "successThreshold": 1,
-# "failureThreshold": 3,
-# },
-# "livenessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 3000,
-# },
-# "initialDelaySeconds": 60,
-# "timeoutSeconds": 30,
-# "failureThreshold": 10,
-# },
-# },
-# },
-# ],
-# "kubernetesResources": {
-# "ingressResources": [
-# {
-# "name": "grafana-ingress",
-# "annotations": {
-# "nginx.ingress.kubernetes.io/proxy-body-size": "0",
-# "nginx.ingress.kubernetes.io/ssl-redirect": "false",
-# },
-# "spec": {
-# "rules": [
-# {
-# "host": "grafana",
-# "http": {
-# "paths": [
-# {
-# "path": "/",
-# "backend": {
-# "serviceName": "grafana",
-# "servicePort": 3000,
-# },
-# }
-# ]
-# },
-# }
-# ]
-# },
-# }
-# ],
-# },
-# }
-
-# self.harness.charm.on.start.emit()
-
-# # Initializing the prometheus relation
-# relation_id = self.harness.add_relation("prometheus", "prometheus")
-# self.harness.add_relation_unit(relation_id, "prometheus/0")
-# self.harness.update_relation_data(
-# relation_id,
-# "prometheus",
-# {
-# "hostname": "prometheus",
-# "port": "9090",
-# },
-# )
-
-# self.harness.update_config({"site_url": "http://grafana"})
-
-# pod_spec, _ = self.harness.get_pod_spec()
-
-# self.assertDictEqual(expected_result, pod_spec)
-
-# def test_ingress_resources_with_https(self) -> NoReturn:
-# """Test ingress resources with HTTPS."""
-# expected_result = {
-# "version": 3,
-# "containers": [
-# {
-# "name": "grafana",
-# "imageDetails": self.harness.charm.image.fetch(),
-# "imagePullPolicy": "Always",
-# "ports": [
-# {
-# "name": "grafana",
-# "containerPort": 3000,
-# "protocol": "TCP",
-# }
-# ],
-# "envConfig": {},
-# "volumeConfig": [
-# {
-# "name": "dashboards",
-# "mountPath": "/etc/grafana/provisioning/dashboards/",
-# "files": [
-# {
-# "path": "dashboard-osm.yml",
-# "content": (
-# "apiVersion: 1\n"
-# "providers:\n"
-# " - name: 'osm'\n"
-# " orgId: 1\n"
-# " folder: ''\n"
-# " type: file\n"
-# " options:\n"
-# " path: /etc/grafana/provisioning/dashboards/\n"
-# ),
-# },
-# ],
-# },
-# {
-# "name": "datasources",
-# "mountPath": "/etc/grafana/provisioning/datasources/",
-# "files": [
-# {
-# "path": "datasource-prometheus.yml",
-# "content": (
-# "datasources:\n"
-# " - access: proxy\n"
-# " editable: true\n"
-# " is_default: true\n"
-# " name: osm_prometheus\n"
-# " orgId: 1\n"
-# " type: prometheus\n"
-# " version: 1\n"
-# " url: http://prometheus:9090\n"
-# ),
-# },
-# ],
-# },
-# ],
-# "kubernetes": {
-# "readinessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 3000,
-# },
-# "initialDelaySeconds": 10,
-# "periodSeconds": 10,
-# "timeoutSeconds": 5,
-# "successThreshold": 1,
-# "failureThreshold": 3,
-# },
-# "livenessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 3000,
-# },
-# "initialDelaySeconds": 60,
-# "timeoutSeconds": 30,
-# "failureThreshold": 10,
-# },
-# },
-# },
-# ],
-# "kubernetesResources": {
-# "ingressResources": [
-# {
-# "name": "grafana-ingress",
-# "annotations": {
-# "nginx.ingress.kubernetes.io/proxy-body-size": "0",
-# },
-# "spec": {
-# "rules": [
-# {
-# "host": "grafana",
-# "http": {
-# "paths": [
-# {
-# "path": "/",
-# "backend": {
-# "serviceName": "grafana",
-# "servicePort": 3000,
-# },
-# }
-# ]
-# },
-# }
-# ],
-# "tls": [{"hosts": ["grafana"], "secretName": "grafana"}],
-# },
-# }
-# ],
-# },
-# }
-
-# self.harness.charm.on.start.emit()
-
-# # Initializing the prometheus relation
-# relation_id = self.harness.add_relation("prometheus", "prometheus")
-# self.harness.add_relation_unit(relation_id, "prometheus/0")
-# self.harness.update_relation_data(
-# relation_id,
-# "prometheus",
-# {
-# "hostname": "prometheus",
-# "port": "9090",
-# },
-# )
-
-# self.harness.update_config(
-# {"site_url": "https://grafana", "tls_secret_name": "grafana"}
-# )
-
-# pod_spec, _ = self.harness.get_pod_spec()
-
-# self.assertDictEqual(expected_result, pod_spec)
-
-# def test_ingress_resources_with_https_and_ingress_whitelist(self) -> NoReturn:
-# """Test ingress resources with HTTPS and ingress whitelist."""
-# expected_result = {
-# "version": 3,
-# "containers": [
-# {
-# "name": "grafana",
-# "imageDetails": self.harness.charm.image.fetch(),
-# "imagePullPolicy": "Always",
-# "ports": [
-# {
-# "name": "grafana",
-# "containerPort": 3000,
-# "protocol": "TCP",
-# }
-# ],
-# "envConfig": {},
-# "volumeConfig": [
-# {
-# "name": "dashboards",
-# "mountPath": "/etc/grafana/provisioning/dashboards/",
-# "files": [
-# {
-# "path": "dashboard-osm.yml",
-# "content": (
-# "apiVersion: 1\n"
-# "providers:\n"
-# " - name: 'osm'\n"
-# " orgId: 1\n"
-# " folder: ''\n"
-# " type: file\n"
-# " options:\n"
-# " path: /etc/grafana/provisioning/dashboards/\n"
-# ),
-# },
-# ],
-# },
-# {
-# "name": "datasources",
-# "mountPath": "/etc/grafana/provisioning/datasources/",
-# "files": [
-# {
-# "path": "datasource-prometheus.yml",
-# "content": (
-# "datasources:\n"
-# " - access: proxy\n"
-# " editable: true\n"
-# " is_default: true\n"
-# " name: osm_prometheus\n"
-# " orgId: 1\n"
-# " type: prometheus\n"
-# " version: 1\n"
-# " url: http://prometheus:9090\n"
-# ),
-# },
-# ],
-# },
-# ],
-# "kubernetes": {
-# "readinessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 3000,
-# },
-# "initialDelaySeconds": 10,
-# "periodSeconds": 10,
-# "timeoutSeconds": 5,
-# "successThreshold": 1,
-# "failureThreshold": 3,
-# },
-# "livenessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 3000,
-# },
-# "initialDelaySeconds": 60,
-# "timeoutSeconds": 30,
-# "failureThreshold": 10,
-# },
-# },
-# },
-# ],
-# "kubernetesResources": {
-# "ingressResources": [
-# {
-# "name": "grafana-ingress",
-# "annotations": {
-# "nginx.ingress.kubernetes.io/proxy-body-size": "0",
-# "nginx.ingress.kubernetes.io/whitelist-source-range": "0.0.0.0/0",
-# },
-# "spec": {
-# "rules": [
-# {
-# "host": "grafana",
-# "http": {
-# "paths": [
-# {
-# "path": "/",
-# "backend": {
-# "serviceName": "grafana",
-# "servicePort": 3000,
-# },
-# }
-# ]
-# },
-# }
-# ],
-# "tls": [{"hosts": ["grafana"], "secretName": "grafana"}],
-# },
-# }
-# ],
-# },
-# }
-
-# self.harness.charm.on.start.emit()
-
-# # Initializing the prometheus relation
-# relation_id = self.harness.add_relation("prometheus", "prometheus")
-# self.harness.add_relation_unit(relation_id, "prometheus/0")
-# self.harness.update_relation_data(
-# relation_id,
-# "prometheus",
-# {
-# "hostname": "prometheus",
-# "port": "9090",
-# },
-# )
-
-# self.harness.update_config(
-# {
-# "site_url": "https://grafana",
-# "tls_secret_name": "grafana",
-# "ingress_whitelist_source_range": "0.0.0.0/0",
-# }
-# )
-
-# pod_spec, _ = self.harness.get_pod_spec()
-
-# self.assertDictEqual(expected_result, pod_spec)
-
-# def test_on_prometheus_unit_relation_changed(self) -> NoReturn:
-# """Test to see if prometheus relation is updated."""
-# self.harness.charm.on.start.emit()
-
-# relation_id = self.harness.add_relation("prometheus", "prometheus")
-# self.harness.add_relation_unit(relation_id, "prometheus/0")
-# self.harness.update_relation_data(
-# relation_id,
-# "prometheus",
-# {"hostname": "prometheus", "port": 9090},
-# )
-
-# # Verifying status
-# self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/installers/charm/grafana/tests/test_pod_spec.py b/installers/charm/grafana/tests/test_pod_spec.py
deleted file mode 100644
index 88c85d3e..00000000
--- a/installers/charm/grafana/tests/test_pod_spec.py
+++ /dev/null
@@ -1,636 +0,0 @@
-# #!/usr/bin/env python3
-# # Copyright 2021 Canonical Ltd.
-# #
-# # Licensed under the Apache License, Version 2.0 (the "License"); you may
-# # not use this file except in compliance with the License. You may obtain
-# # a copy of the License at
-# #
-# # http://www.apache.org/licenses/LICENSE-2.0
-# #
-# # Unless required by applicable law or agreed to in writing, software
-# # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# # License for the specific language governing permissions and limitations
-# # under the License.
-# #
-# # For those usages not covered by the Apache License, Version 2.0 please
-# # contact: legal@canonical.com
-# #
-# # To get in touch with the maintainers, please contact:
-# # osm-charmers@lists.launchpad.net
-# ##
-
-# from typing import NoReturn
-# import unittest
-
-# import pod_spec
-
-
-# class TestPodSpec(unittest.TestCase):
-# """Pod spec unit tests."""
-
-# def test_make_pod_ports(self) -> NoReturn:
-# """Testing make pod ports."""
-# port = 3000
-
-# expected_result = [
-# {
-# "name": "grafana",
-# "containerPort": port,
-# "protocol": "TCP",
-# }
-# ]
-
-# pod_ports = pod_spec._make_pod_ports(port)
-
-# self.assertListEqual(expected_result, pod_ports)
-
-# def test_make_pod_envconfig(self) -> NoReturn:
-# """Teting make pod envconfig."""
-# config = {}
-# relation_state = {
-# "prometheus_hostname": "prometheus",
-# "prometheus_port": "9090",
-# }
-
-# expected_result = {}
-
-# pod_envconfig = pod_spec._make_pod_envconfig(config, relation_state)
-
-# self.assertDictEqual(expected_result, pod_envconfig)
-
-# def test_make_pod_ingress_resources_without_site_url(self) -> NoReturn:
-# """Testing make pod ingress resources without site_url."""
-# config = {"site_url": ""}
-# app_name = "grafana"
-# port = 3000
-
-# pod_ingress_resources = pod_spec._make_pod_ingress_resources(
-# config, app_name, port
-# )
-
-# self.assertIsNone(pod_ingress_resources)
-
-# def test_make_pod_ingress_resources(self) -> NoReturn:
-# """Testing make pod ingress resources."""
-# config = {
-# "site_url": "http://grafana",
-# "max_file_size": 0,
-# "ingress_whitelist_source_range": "",
-# }
-# app_name = "grafana"
-# port = 3000
-
-# expected_result = [
-# {
-# "name": f"{app_name}-ingress",
-# "annotations": {
-# "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}",
-# "nginx.ingress.kubernetes.io/ssl-redirect": "false",
-# },
-# "spec": {
-# "rules": [
-# {
-# "host": app_name,
-# "http": {
-# "paths": [
-# {
-# "path": "/",
-# "backend": {
-# "serviceName": app_name,
-# "servicePort": port,
-# },
-# }
-# ]
-# },
-# }
-# ]
-# },
-# }
-# ]
-
-# pod_ingress_resources = pod_spec._make_pod_ingress_resources(
-# config, app_name, port
-# )
-
-# self.assertListEqual(expected_result, pod_ingress_resources)
-
-# def test_make_pod_ingress_resources_with_whitelist_source_range(self) -> NoReturn:
-# """Testing make pod ingress resources with whitelist_source_range."""
-# config = {
-# "site_url": "http://grafana",
-# "max_file_size": 0,
-# "ingress_whitelist_source_range": "0.0.0.0/0",
-# }
-# app_name = "grafana"
-# port = 3000
-
-# expected_result = [
-# {
-# "name": f"{app_name}-ingress",
-# "annotations": {
-# "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}",
-# "nginx.ingress.kubernetes.io/ssl-redirect": "false",
-# "nginx.ingress.kubernetes.io/whitelist-source-range": config[
-# "ingress_whitelist_source_range"
-# ],
-# },
-# "spec": {
-# "rules": [
-# {
-# "host": app_name,
-# "http": {
-# "paths": [
-# {
-# "path": "/",
-# "backend": {
-# "serviceName": app_name,
-# "servicePort": port,
-# },
-# }
-# ]
-# },
-# }
-# ]
-# },
-# }
-# ]
-
-# pod_ingress_resources = pod_spec._make_pod_ingress_resources(
-# config, app_name, port
-# )
-
-# self.assertListEqual(expected_result, pod_ingress_resources)
-
-# def test_make_pod_ingress_resources_with_https(self) -> NoReturn:
-# """Testing make pod ingress resources with HTTPs."""
-# config = {
-# "site_url": "https://grafana",
-# "max_file_size": 0,
-# "ingress_whitelist_source_range": "",
-# "tls_secret_name": "",
-# }
-# app_name = "grafana"
-# port = 3000
-
-# expected_result = [
-# {
-# "name": f"{app_name}-ingress",
-# "annotations": {
-# "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}",
-# },
-# "spec": {
-# "rules": [
-# {
-# "host": app_name,
-# "http": {
-# "paths": [
-# {
-# "path": "/",
-# "backend": {
-# "serviceName": app_name,
-# "servicePort": port,
-# },
-# }
-# ]
-# },
-# }
-# ],
-# "tls": [{"hosts": [app_name]}],
-# },
-# }
-# ]
-
-# pod_ingress_resources = pod_spec._make_pod_ingress_resources(
-# config, app_name, port
-# )
-
-# self.assertListEqual(expected_result, pod_ingress_resources)
-
-# def test_make_pod_ingress_resources_with_https_tls_secret_name(self) -> NoReturn:
-# """Testing make pod ingress resources with HTTPs and TLS secret name."""
-# config = {
-# "site_url": "https://grafana",
-# "max_file_size": 0,
-# "ingress_whitelist_source_range": "",
-# "tls_secret_name": "secret_name",
-# }
-# app_name = "grafana"
-# port = 3000
-
-# expected_result = [
-# {
-# "name": f"{app_name}-ingress",
-# "annotations": {
-# "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}",
-# },
-# "spec": {
-# "rules": [
-# {
-# "host": app_name,
-# "http": {
-# "paths": [
-# {
-# "path": "/",
-# "backend": {
-# "serviceName": app_name,
-# "servicePort": port,
-# },
-# }
-# ]
-# },
-# }
-# ],
-# "tls": [
-# {"hosts": [app_name], "secretName": config["tls_secret_name"]}
-# ],
-# },
-# }
-# ]
-
-# pod_ingress_resources = pod_spec._make_pod_ingress_resources(
-# config, app_name, port
-# )
-
-# self.assertListEqual(expected_result, pod_ingress_resources)
-
-# def test_make_pod_files(self) -> NoReturn:
-# """Testing make pod files."""
-# config = {"osm_dashboards": False}
-# relation_state = {
-# "prometheus_hostname": "prometheus",
-# "prometheus_port": "9090",
-# }
-
-# expected_result = [
-# {
-# "name": "dashboards",
-# "mountPath": "/etc/grafana/provisioning/dashboards/",
-# "files": [
-# {
-# "path": "dashboard-osm.yml",
-# "content": (
-# "apiVersion: 1\n"
-# "providers:\n"
-# " - name: 'osm'\n"
-# " orgId: 1\n"
-# " folder: ''\n"
-# " type: file\n"
-# " options:\n"
-# " path: /etc/grafana/provisioning/dashboards/\n"
-# ),
-# }
-# ],
-# },
-# {
-# "name": "datasources",
-# "mountPath": "/etc/grafana/provisioning/datasources/",
-# "files": [
-# {
-# "path": "datasource-prometheus.yml",
-# "content": (
-# "datasources:\n"
-# " - access: proxy\n"
-# " editable: true\n"
-# " is_default: true\n"
-# " name: osm_prometheus\n"
-# " orgId: 1\n"
-# " type: prometheus\n"
-# " version: 1\n"
-# " url: http://{}:{}\n".format(
-# relation_state.get("prometheus_hostname"),
-# relation_state.get("prometheus_port"),
-# )
-# ),
-# }
-# ],
-# },
-# ]
-
-# pod_envconfig = pod_spec._make_pod_files(config, relation_state)
-# self.assertListEqual(expected_result, pod_envconfig)
-
-# def test_make_readiness_probe(self) -> NoReturn:
-# """Testing make readiness probe."""
-# port = 3000
-
-# expected_result = {
-# "httpGet": {
-# "path": "/api/health",
-# "port": port,
-# },
-# "initialDelaySeconds": 10,
-# "periodSeconds": 10,
-# "timeoutSeconds": 5,
-# "successThreshold": 1,
-# "failureThreshold": 3,
-# }
-
-# readiness_probe = pod_spec._make_readiness_probe(port)
-
-# self.assertDictEqual(expected_result, readiness_probe)
-
-# def test_make_liveness_probe(self) -> NoReturn:
-# """Testing make liveness probe."""
-# port = 3000
-
-# expected_result = {
-# "httpGet": {
-# "path": "/api/health",
-# "port": port,
-# },
-# "initialDelaySeconds": 60,
-# "timeoutSeconds": 30,
-# "failureThreshold": 10,
-# }
-
-# liveness_probe = pod_spec._make_liveness_probe(port)
-
-# self.assertDictEqual(expected_result, liveness_probe)
-
-# def test_make_pod_spec(self) -> NoReturn:
-# """Testing make pod spec."""
-# image_info = {"upstream-source": "ubuntu/grafana:latest"}
-# config = {
-# "site_url": "",
-# }
-# relation_state = {
-# "prometheus_hostname": "prometheus",
-# "prometheus_port": "9090",
-# }
-# app_name = "grafana"
-# port = 3000
-
-# expected_result = {
-# "version": 3,
-# "containers": [
-# {
-# "name": app_name,
-# "imageDetails": image_info,
-# "imagePullPolicy": "Always",
-# "ports": [
-# {
-# "name": app_name,
-# "containerPort": port,
-# "protocol": "TCP",
-# }
-# ],
-# "envConfig": {},
-# "volumeConfig": [
-# {
-# "name": "dashboards",
-# "mountPath": "/etc/grafana/provisioning/dashboards/",
-# "files": [
-# {
-# "path": "dashboard-osm.yml",
-# "content": (
-# "apiVersion: 1\n"
-# "providers:\n"
-# " - name: 'osm'\n"
-# " orgId: 1\n"
-# " folder: ''\n"
-# " type: file\n"
-# " options:\n"
-# " path: /etc/grafana/provisioning/dashboards/\n"
-# ),
-# }
-# ],
-# },
-# {
-# "name": "datasources",
-# "mountPath": "/etc/grafana/provisioning/datasources/",
-# "files": [
-# {
-# "path": "datasource-prometheus.yml",
-# "content": (
-# "datasources:\n"
-# " - access: proxy\n"
-# " editable: true\n"
-# " is_default: true\n"
-# " name: osm_prometheus\n"
-# " orgId: 1\n"
-# " type: prometheus\n"
-# " version: 1\n"
-# " url: http://{}:{}\n".format(
-# relation_state.get("prometheus_hostname"),
-# relation_state.get("prometheus_port"),
-# )
-# ),
-# }
-# ],
-# },
-# ],
-# "kubernetes": {
-# "readinessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": port,
-# },
-# "initialDelaySeconds": 10,
-# "periodSeconds": 10,
-# "timeoutSeconds": 5,
-# "successThreshold": 1,
-# "failureThreshold": 3,
-# },
-# "livenessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": port,
-# },
-# "initialDelaySeconds": 60,
-# "timeoutSeconds": 30,
-# "failureThreshold": 10,
-# },
-# },
-# }
-# ],
-# "kubernetesResources": {"ingressResources": []},
-# }
-
-# spec = pod_spec.make_pod_spec(
-# image_info, config, relation_state, app_name, port
-# )
-
-# self.assertDictEqual(expected_result, spec)
-
-# def test_make_pod_spec_with_ingress(self) -> NoReturn:
-# """Testing make pod spec."""
-# image_info = {"upstream-source": "ubuntu/grafana:latest"}
-# config = {
-# "site_url": "https://grafana",
-# "tls_secret_name": "grafana",
-# "max_file_size": 0,
-# "ingress_whitelist_source_range": "0.0.0.0/0",
-# }
-# relation_state = {
-# "prometheus_hostname": "prometheus",
-# "prometheus_port": "9090",
-# }
-# app_name = "grafana"
-# port = 3000
-
-# expected_result = {
-# "version": 3,
-# "containers": [
-# {
-# "name": app_name,
-# "imageDetails": image_info,
-# "imagePullPolicy": "Always",
-# "ports": [
-# {
-# "name": app_name,
-# "containerPort": port,
-# "protocol": "TCP",
-# }
-# ],
-# "envConfig": {},
-# "volumeConfig": [
-# {
-# "name": "dashboards",
-# "mountPath": "/etc/grafana/provisioning/dashboards/",
-# "files": [
-# {
-# "path": "dashboard-osm.yml",
-# "content": (
-# "apiVersion: 1\n"
-# "providers:\n"
-# " - name: 'osm'\n"
-# " orgId: 1\n"
-# " folder: ''\n"
-# " type: file\n"
-# " options:\n"
-# " path: /etc/grafana/provisioning/dashboards/\n"
-# ),
-# }
-# ],
-# },
-# {
-# "name": "datasources",
-# "mountPath": "/etc/grafana/provisioning/datasources/",
-# "files": [
-# {
-# "path": "datasource-prometheus.yml",
-# "content": (
-# "datasources:\n"
-# " - access: proxy\n"
-# " editable: true\n"
-# " is_default: true\n"
-# " name: osm_prometheus\n"
-# " orgId: 1\n"
-# " type: prometheus\n"
-# " version: 1\n"
-# " url: http://{}:{}\n".format(
-# relation_state.get("prometheus_hostname"),
-# relation_state.get("prometheus_port"),
-# )
-# ),
-# }
-# ],
-# },
-# ],
-# "kubernetes": {
-# "readinessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": port,
-# },
-# "initialDelaySeconds": 10,
-# "periodSeconds": 10,
-# "timeoutSeconds": 5,
-# "successThreshold": 1,
-# "failureThreshold": 3,
-# },
-# "livenessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": port,
-# },
-# "initialDelaySeconds": 60,
-# "timeoutSeconds": 30,
-# "failureThreshold": 10,
-# },
-# },
-# }
-# ],
-# "kubernetesResources": {
-# "ingressResources": [
-# {
-# "name": "{}-ingress".format(app_name),
-# "annotations": {
-# "nginx.ingress.kubernetes.io/proxy-body-size": str(
-# config.get("max_file_size")
-# ),
-# "nginx.ingress.kubernetes.io/whitelist-source-range": config.get(
-# "ingress_whitelist_source_range"
-# ),
-# },
-# "spec": {
-# "rules": [
-# {
-# "host": app_name,
-# "http": {
-# "paths": [
-# {
-# "path": "/",
-# "backend": {
-# "serviceName": app_name,
-# "servicePort": port,
-# },
-# }
-# ]
-# },
-# }
-# ],
-# "tls": [
-# {
-# "hosts": [app_name],
-# "secretName": config.get("tls_secret_name"),
-# }
-# ],
-# },
-# }
-# ],
-# },
-# }
-
-# spec = pod_spec.make_pod_spec(
-# image_info, config, relation_state, app_name, port
-# )
-
-# self.assertDictEqual(expected_result, spec)
-
-# def test_make_pod_spec_without_image_info(self) -> NoReturn:
-# """Testing make pod spec without image_info."""
-# image_info = None
-# config = {
-# "site_url": "",
-# }
-# relation_state = {
-# "prometheus_hostname": "prometheus",
-# "prometheus_port": "9090",
-# }
-# app_name = "grafana"
-# port = 3000
-
-# spec = pod_spec.make_pod_spec(
-# image_info, config, relation_state, app_name, port
-# )
-
-# self.assertIsNone(spec)
-
-# def test_make_pod_spec_without_relation_state(self) -> NoReturn:
-# """Testing make pod spec without relation_state."""
-# image_info = {"upstream-source": "ubuntu/grafana:latest"}
-# config = {
-# "site_url": "",
-# }
-# relation_state = {}
-# app_name = "grafana"
-# port = 3000
-
-# with self.assertRaises(ValueError):
-# pod_spec.make_pod_spec(image_info, config, relation_state, app_name, port)
-
-
-# if __name__ == "__main__":
-# unittest.main()
diff --git a/installers/charm/grafana/tox.ini b/installers/charm/grafana/tox.ini
deleted file mode 100644
index 58e13a66..00000000
--- a/installers/charm/grafana/tox.ini
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-#######################################################################################
-
-[tox]
-envlist = black, cover, flake8, pylint, yamllint, safety
-skipsdist = true
-
-[tox:jenkins]
-toxworkdir = /tmp/.tox
-
-[testenv]
-basepython = python3.8
-setenv = VIRTUAL_ENV={envdir}
- PYTHONDONTWRITEBYTECODE = 1
-deps = -r{toxinidir}/requirements.txt
-
-
-#######################################################################################
-[testenv:black]
-deps = black
-commands =
- black --check --diff src/ tests/
-
-
-#######################################################################################
-[testenv:cover]
-deps = {[testenv]deps}
- -r{toxinidir}/requirements-test.txt
- coverage
- nose2
-commands =
- sh -c 'rm -f nosetests.xml'
- coverage erase
- nose2 -C --coverage src
- coverage report --omit='*tests*'
- coverage html -d ./cover --omit='*tests*'
- coverage xml -o coverage.xml --omit=*tests*
-whitelist_externals = sh
-
-
-#######################################################################################
-[testenv:flake8]
-deps = flake8
- flake8-import-order
-commands =
- flake8 src/ tests/ --exclude=*pod_spec*
-
-
-#######################################################################################
-[testenv:pylint]
-deps = {[testenv]deps}
- -r{toxinidir}/requirements-test.txt
- pylint==2.10.2
-commands =
- pylint -E src/ tests/
-
-
-#######################################################################################
-[testenv:safety]
-setenv =
- LC_ALL=C.UTF-8
- LANG=C.UTF-8
-deps = {[testenv]deps}
- safety
-commands =
- - safety check --full-report
-
-
-#######################################################################################
-[testenv:yamllint]
-deps = {[testenv]deps}
- -r{toxinidir}/requirements-test.txt
- yamllint
-commands = yamllint .
-
-#######################################################################################
-[testenv:build]
-passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY
-whitelist_externals =
- charmcraft
- sh
-commands =
- charmcraft pack
- sh -c 'ubuntu_version=20.04; \
- architectures="amd64-aarch64-arm64"; \
- charm_name=`cat metadata.yaml | grep -E "^name: " | cut -f 2 -d " "`; \
- mv $charm_name"_ubuntu-"$ubuntu_version-$architectures.charm $charm_name.charm'
-
-#######################################################################################
-[flake8]
-ignore =
- W291,
- W293,
- W503,
- E123,
- E125,
- E226,
- E241,
-exclude =
- .git,
- __pycache__,
- .tox,
-max-line-length = 120
-show-source = True
-builtins = _
-max-complexity = 10
-import-order-style = google
diff --git a/installers/charm/juju-simplestreams-operator/.gitignore b/installers/charm/juju-simplestreams-operator/.gitignore
deleted file mode 100644
index 87d0a587..00000000
--- a/installers/charm/juju-simplestreams-operator/.gitignore
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-venv/
-build/
-*.charm
-.tox/
-.coverage
-coverage.xml
-__pycache__/
-*.py[cod]
-.vscode
\ No newline at end of file
diff --git a/installers/charm/juju-simplestreams-operator/.jujuignore b/installers/charm/juju-simplestreams-operator/.jujuignore
deleted file mode 100644
index 17c7a8bb..00000000
--- a/installers/charm/juju-simplestreams-operator/.jujuignore
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-/venv
-*.py[cod]
-*.charm
diff --git a/installers/charm/juju-simplestreams-operator/CONTRIBUTING.md b/installers/charm/juju-simplestreams-operator/CONTRIBUTING.md
deleted file mode 100644
index 74a6d6d0..00000000
--- a/installers/charm/juju-simplestreams-operator/CONTRIBUTING.md
+++ /dev/null
@@ -1,78 +0,0 @@
-
-
-# Contributing
-
-## Overview
-
-This documents explains the processes and practices recommended for contributing enhancements to
-this operator.
-
-- Generally, before developing enhancements to this charm, you should consider [opening an issue
- ](https://osm.etsi.org/bugzilla/enter_bug.cgi?product=OSM) explaining your use case. (Component=devops, version=master)
-- If you would like to chat with us about your use-cases or proposed implementation, you can reach
- us at [OSM Juju public channel](https://opensourcemano.slack.com/archives/C027KJGPECA).
-- Familiarising yourself with the [Charmed Operator Framework](https://juju.is/docs/sdk) library
- will help you a lot when working on new features or bug fixes.
-- All enhancements require review before being merged. Code review typically examines
- - code quality
- - test coverage
- - user experience for Juju administrators this charm.
-- Please help us out in ensuring easy to review branches by rebasing your gerrit patch onto
- the `master` branch.
-
-## Developing
-
-You can use the environments created by `tox` for development:
-
-```shell
-tox --notest -e unit
-source .tox/unit/bin/activate
-```
-
-### Testing
-
-```shell
-tox -e fmt # update your code according to linting rules
-tox -e lint # code style
-tox -e unit # unit tests
-tox -e integration # integration tests
-tox # runs 'lint' and 'unit' environments
-```
-
-## Build charm
-
-Build the charm in this git repository using:
-
-```shell
-charmcraft pack
-```
-
-### Deploy
-
-```bash
-# Create a model
-juju add-model dev
-# Enable DEBUG logging
-juju model-config logging-config="=INFO;unit=DEBUG"
-# Deploy the charm
-juju deploy ./osm-juju-simplestreams_ubuntu-22.04-amd64.charm \
- --resource server-image=nginx:1.23.0
-```
diff --git a/installers/charm/juju-simplestreams-operator/LICENSE b/installers/charm/juju-simplestreams-operator/LICENSE
deleted file mode 100644
index 7e9d5046..00000000
--- a/installers/charm/juju-simplestreams-operator/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2022 Canonical Ltd.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/installers/charm/juju-simplestreams-operator/README.md b/installers/charm/juju-simplestreams-operator/README.md
deleted file mode 100644
index bc94ddee..00000000
--- a/installers/charm/juju-simplestreams-operator/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
-
-
-
-
-# Juju simplestreams
-
-Charmhub package name: osm-juju-simplestreams
-More information: https://charmhub.io/osm-juju-simplestreams
-
-## Other resources
-
-* [Read more](https://osm.etsi.org/docs/user-guide/latest/)
-
-* [Contributing](https://osm.etsi.org/gitweb/?p=osm/devops.git;a=blob;f=installers/charm/osm-juju-simplestreams/CONTRIBUTING.md)
-
-* See the [Juju SDK documentation](https://juju.is/docs/sdk) for more information about developing and improving charms.
diff --git a/installers/charm/juju-simplestreams-operator/actions.yaml b/installers/charm/juju-simplestreams-operator/actions.yaml
deleted file mode 100644
index c8d0e323..00000000
--- a/installers/charm/juju-simplestreams-operator/actions.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Actions tab on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-add-image-metadata:
- description: Action to add image metadata
- params:
- series:
- description: Charm series
- type: string
- image-id:
- description: Openstack image id for the specified series
- type: string
- region:
- description: Openstack region
- type: string
- auth-url:
- description: Openstack authentication url
- type: string
- required:
- - series
- - image-id
- - region
- - auth-url
-backup:
- description: Action to get a backup of the important data.
-restore:
- description: Action to restore from a backup.
diff --git a/installers/charm/juju-simplestreams-operator/charmcraft.yaml b/installers/charm/juju-simplestreams-operator/charmcraft.yaml
deleted file mode 100644
index f8944c55..00000000
--- a/installers/charm/juju-simplestreams-operator/charmcraft.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-
-type: charm
-bases:
- - build-on:
- - name: "ubuntu"
- channel: "20.04"
- run-on:
- - name: "ubuntu"
- channel: "20.04"
-
-parts:
- charm:
- prime:
- - files/*
diff --git a/installers/charm/juju-simplestreams-operator/config.yaml b/installers/charm/juju-simplestreams-operator/config.yaml
deleted file mode 100644
index b76533fd..00000000
--- a/installers/charm/juju-simplestreams-operator/config.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Configure tab on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-options:
- # Ingress options
- external-hostname:
- default: ""
- description: |
- The url that will be configured in the Kubernetes ingress.
-
- The easiest way of configuring the external-hostname without having the DNS setup is by using
- a Wildcard DNS like nip.io constructing the url like so:
- - nbi.127.0.0.1.nip.io (valid within the K8s cluster node)
- - nbi..nip.io (valid from outside the K8s cluster node)
-
- This option is only applicable when the Kubernetes cluster has nginx ingress configured
- and the charm is related to the nginx-ingress-integrator.
- See more: https://charmhub.io/nginx-ingress-integrator
- type: string
- max-body-size:
- default: 20
- description:
- Max allowed body-size (for file uploads) in megabytes, set to 0 to
- disable limits.
- source: default
- type: int
- value: 20
- tls-secret-name:
- description: TLS secret name to use for ingress.
- type: string
diff --git a/installers/charm/juju-simplestreams-operator/files/juju-metadata b/installers/charm/juju-simplestreams-operator/files/juju-metadata
deleted file mode 100755
index b6007fe6..00000000
Binary files a/installers/charm/juju-simplestreams-operator/files/juju-metadata and /dev/null differ
diff --git a/installers/charm/juju-simplestreams-operator/files/nginx.conf b/installers/charm/juju-simplestreams-operator/files/nginx.conf
deleted file mode 100644
index d47540ea..00000000
--- a/installers/charm/juju-simplestreams-operator/files/nginx.conf
+++ /dev/null
@@ -1,35 +0,0 @@
-#######################################################################################
-# Copyright ETSI Contributors and Others.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#######################################################################################
-
-events {}
-http {
- include mime.types;
- sendfile on;
-
- server {
- listen 8080;
- listen [::]:8080;
-
- autoindex off;
-
- server_name _;
- server_tokens off;
-
- root /app/static;
- gzip_static on;
- }
-}
\ No newline at end of file
diff --git a/installers/charm/juju-simplestreams-operator/lib/charms/nginx_ingress_integrator/v0/ingress.py b/installers/charm/juju-simplestreams-operator/lib/charms/nginx_ingress_integrator/v0/ingress.py
deleted file mode 100644
index be2d762b..00000000
--- a/installers/charm/juju-simplestreams-operator/lib/charms/nginx_ingress_integrator/v0/ingress.py
+++ /dev/null
@@ -1,229 +0,0 @@
-# See LICENSE file for licensing details.
-# http://www.apache.org/licenses/LICENSE-2.0
-"""Library for the ingress relation.
-
-This library contains the Requires and Provides classes for handling
-the ingress interface.
-
-Import `IngressRequires` in your charm, with two required options:
- - "self" (the charm itself)
- - config_dict
-
-`config_dict` accepts the following keys:
- - service-hostname (required)
- - service-name (required)
- - service-port (required)
- - additional-hostnames
- - limit-rps
- - limit-whitelist
- - max-body-size
- - owasp-modsecurity-crs
- - path-routes
- - retry-errors
- - rewrite-enabled
- - rewrite-target
- - service-namespace
- - session-cookie-max-age
- - tls-secret-name
-
-See [the config section](https://charmhub.io/nginx-ingress-integrator/configure) for descriptions
-of each, along with the required type.
-
-As an example, add the following to `src/charm.py`:
-```
-from charms.nginx_ingress_integrator.v0.ingress import IngressRequires
-
-# In your charm's `__init__` method.
-self.ingress = IngressRequires(self, {"service-hostname": self.config["external_hostname"],
- "service-name": self.app.name,
- "service-port": 80})
-
-# In your charm's `config-changed` handler.
-self.ingress.update_config({"service-hostname": self.config["external_hostname"]})
-```
-And then add the following to `metadata.yaml`:
-```
-requires:
- ingress:
- interface: ingress
-```
-You _must_ register the IngressRequires class as part of the `__init__` method
-rather than, for instance, a config-changed event handler. This is because
-doing so won't get the current relation changed event, because it wasn't
-registered to handle the event (because it wasn't created in `__init__` when
-the event was fired).
-"""
-
-import logging
-
-from ops.charm import CharmEvents
-from ops.framework import EventBase, EventSource, Object
-from ops.model import BlockedStatus
-
-# The unique Charmhub library identifier, never change it
-LIBID = "db0af4367506491c91663468fb5caa4c"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 10
-
-logger = logging.getLogger(__name__)
-
-REQUIRED_INGRESS_RELATION_FIELDS = {
- "service-hostname",
- "service-name",
- "service-port",
-}
-
-OPTIONAL_INGRESS_RELATION_FIELDS = {
- "additional-hostnames",
- "limit-rps",
- "limit-whitelist",
- "max-body-size",
- "owasp-modsecurity-crs",
- "path-routes",
- "retry-errors",
- "rewrite-target",
- "rewrite-enabled",
- "service-namespace",
- "session-cookie-max-age",
- "tls-secret-name",
-}
-
-
-class IngressAvailableEvent(EventBase):
- pass
-
-
-class IngressBrokenEvent(EventBase):
- pass
-
-
-class IngressCharmEvents(CharmEvents):
- """Custom charm events."""
-
- ingress_available = EventSource(IngressAvailableEvent)
- ingress_broken = EventSource(IngressBrokenEvent)
-
-
-class IngressRequires(Object):
- """This class defines the functionality for the 'requires' side of the 'ingress' relation.
-
- Hook events observed:
- - relation-changed
- """
-
- def __init__(self, charm, config_dict):
- super().__init__(charm, "ingress")
-
- self.framework.observe(charm.on["ingress"].relation_changed, self._on_relation_changed)
-
- self.config_dict = config_dict
-
- def _config_dict_errors(self, update_only=False):
- """Check our config dict for errors."""
- blocked_message = "Error in ingress relation, check `juju debug-log`"
- unknown = [
- x
- for x in self.config_dict
- if x not in REQUIRED_INGRESS_RELATION_FIELDS | OPTIONAL_INGRESS_RELATION_FIELDS
- ]
- if unknown:
- logger.error(
- "Ingress relation error, unknown key(s) in config dictionary found: %s",
- ", ".join(unknown),
- )
- self.model.unit.status = BlockedStatus(blocked_message)
- return True
- if not update_only:
- missing = [x for x in REQUIRED_INGRESS_RELATION_FIELDS if x not in self.config_dict]
- if missing:
- logger.error(
- "Ingress relation error, missing required key(s) in config dictionary: %s",
- ", ".join(sorted(missing)),
- )
- self.model.unit.status = BlockedStatus(blocked_message)
- return True
- return False
-
- def _on_relation_changed(self, event):
- """Handle the relation-changed event."""
- # `self.unit` isn't available here, so use `self.model.unit`.
- if self.model.unit.is_leader():
- if self._config_dict_errors():
- return
- for key in self.config_dict:
- event.relation.data[self.model.app][key] = str(self.config_dict[key])
-
- def update_config(self, config_dict):
- """Allow for updates to relation."""
- if self.model.unit.is_leader():
- self.config_dict = config_dict
- if self._config_dict_errors(update_only=True):
- return
- relation = self.model.get_relation("ingress")
- if relation:
- for key in self.config_dict:
- relation.data[self.model.app][key] = str(self.config_dict[key])
-
-
-class IngressProvides(Object):
- """This class defines the functionality for the 'provides' side of the 'ingress' relation.
-
- Hook events observed:
- - relation-changed
- """
-
- def __init__(self, charm):
- super().__init__(charm, "ingress")
- # Observe the relation-changed hook event and bind
- # self.on_relation_changed() to handle the event.
- self.framework.observe(charm.on["ingress"].relation_changed, self._on_relation_changed)
- self.framework.observe(charm.on["ingress"].relation_broken, self._on_relation_broken)
- self.charm = charm
-
- def _on_relation_changed(self, event):
- """Handle a change to the ingress relation.
-
- Confirm we have the fields we expect to receive."""
- # `self.unit` isn't available here, so use `self.model.unit`.
- if not self.model.unit.is_leader():
- return
-
- ingress_data = {
- field: event.relation.data[event.app].get(field)
- for field in REQUIRED_INGRESS_RELATION_FIELDS | OPTIONAL_INGRESS_RELATION_FIELDS
- }
-
- missing_fields = sorted(
- [
- field
- for field in REQUIRED_INGRESS_RELATION_FIELDS
- if ingress_data.get(field) is None
- ]
- )
-
- if missing_fields:
- logger.error(
- "Missing required data fields for ingress relation: {}".format(
- ", ".join(missing_fields)
- )
- )
- self.model.unit.status = BlockedStatus(
- "Missing fields for ingress: {}".format(", ".join(missing_fields))
- )
-
- # Create an event that our charm can use to decide it's okay to
- # configure the ingress.
- self.charm.on.ingress_available.emit()
-
- def _on_relation_broken(self, _):
- """Handle a relation-broken event in the ingress relation."""
- if not self.model.unit.is_leader():
- return
-
- # Create an event that our charm can use to remove the ingress resource.
- self.charm.on.ingress_broken.emit()
diff --git a/installers/charm/juju-simplestreams-operator/lib/charms/observability_libs/v1/kubernetes_service_patch.py b/installers/charm/juju-simplestreams-operator/lib/charms/observability_libs/v1/kubernetes_service_patch.py
deleted file mode 100644
index 506dbf03..00000000
--- a/installers/charm/juju-simplestreams-operator/lib/charms/observability_libs/v1/kubernetes_service_patch.py
+++ /dev/null
@@ -1,291 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-# See LICENSE file for licensing details.
-# http://www.apache.org/licenses/LICENSE-2.0
-
-"""# KubernetesServicePatch Library.
-
-This library is designed to enable developers to more simply patch the Kubernetes Service created
-by Juju during the deployment of a sidecar charm. When sidecar charms are deployed, Juju creates a
-service named after the application in the namespace (named after the Juju model). This service by
-default contains a "placeholder" port, which is 65536/TCP.
-
-When modifying the default set of resources managed by Juju, one must consider the lifecycle of the
-charm. In this case, any modifications to the default service (created during deployment), will be
-overwritten during a charm upgrade.
-
-When initialised, this library binds a handler to the parent charm's `install` and `upgrade_charm`
-events which applies the patch to the cluster. This should ensure that the service ports are
-correct throughout the charm's life.
-
-The constructor simply takes a reference to the parent charm, and a list of
-[`lightkube`](https://github.com/gtsystem/lightkube) ServicePorts that each define a port for the
-service. For information regarding the `lightkube` `ServicePort` model, please visit the
-`lightkube` [docs](https://gtsystem.github.io/lightkube-models/1.23/models/core_v1/#serviceport).
-
-Optionally, a name of the service (in case service name needs to be patched as well), labels,
-selectors, and annotations can be provided as keyword arguments.
-
-## Getting Started
-
-To get started using the library, you just need to fetch the library using `charmcraft`. **Note
-that you also need to add `lightkube` and `lightkube-models` to your charm's `requirements.txt`.**
-
-```shell
-cd some-charm
-charmcraft fetch-lib charms.observability_libs.v0.kubernetes_service_patch
-echo <<-EOF >> requirements.txt
-lightkube
-lightkube-models
-EOF
-```
-
-Then, to initialise the library:
-
-For `ClusterIP` services:
-
-```python
-# ...
-from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch
-from lightkube.models.core_v1 import ServicePort
-
-class SomeCharm(CharmBase):
- def __init__(self, *args):
- # ...
- port = ServicePort(443, name=f"{self.app.name}")
- self.service_patcher = KubernetesServicePatch(self, [port])
- # ...
-```
-
-For `LoadBalancer`/`NodePort` services:
-
-```python
-# ...
-from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch
-from lightkube.models.core_v1 import ServicePort
-
-class SomeCharm(CharmBase):
- def __init__(self, *args):
- # ...
- port = ServicePort(443, name=f"{self.app.name}", targetPort=443, nodePort=30666)
- self.service_patcher = KubernetesServicePatch(
- self, [port], "LoadBalancer"
- )
- # ...
-```
-
-Port protocols can also be specified. Valid protocols are `"TCP"`, `"UDP"`, and `"SCTP"`
-
-```python
-# ...
-from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch
-from lightkube.models.core_v1 import ServicePort
-
-class SomeCharm(CharmBase):
- def __init__(self, *args):
- # ...
- tcp = ServicePort(443, name=f"{self.app.name}-tcp", protocol="TCP")
- udp = ServicePort(443, name=f"{self.app.name}-udp", protocol="UDP")
- sctp = ServicePort(443, name=f"{self.app.name}-sctp", protocol="SCTP")
- self.service_patcher = KubernetesServicePatch(self, [tcp, udp, sctp])
- # ...
-```
-
-Additionally, you may wish to use mocks in your charm's unit testing to ensure that the library
-does not try to make any API calls, or open any files during testing that are unlikely to be
-present, and could break your tests. The easiest way to do this is during your test `setUp`:
-
-```python
-# ...
-
-@patch("charm.KubernetesServicePatch", lambda x, y: None)
-def setUp(self, *unused):
- self.harness = Harness(SomeCharm)
- # ...
-```
-"""
-
-import logging
-from types import MethodType
-from typing import List, Literal
-
-from lightkube import ApiError, Client
-from lightkube.models.core_v1 import ServicePort, ServiceSpec
-from lightkube.models.meta_v1 import ObjectMeta
-from lightkube.resources.core_v1 import Service
-from lightkube.types import PatchType
-from ops.charm import CharmBase
-from ops.framework import Object
-
-logger = logging.getLogger(__name__)
-
-# The unique Charmhub library identifier, never change it
-LIBID = "0042f86d0a874435adef581806cddbbb"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 1
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 1
-
-ServiceType = Literal["ClusterIP", "LoadBalancer"]
-
-
-class KubernetesServicePatch(Object):
- """A utility for patching the Kubernetes service set up by Juju."""
-
- def __init__(
- self,
- charm: CharmBase,
- ports: List[ServicePort],
- service_name: str = None,
- service_type: ServiceType = "ClusterIP",
- additional_labels: dict = None,
- additional_selectors: dict = None,
- additional_annotations: dict = None,
- ):
- """Constructor for KubernetesServicePatch.
-
- Args:
- charm: the charm that is instantiating the library.
- ports: a list of ServicePorts
- service_name: allows setting custom name to the patched service. If none given,
- application name will be used.
- service_type: desired type of K8s service. Default value is in line with ServiceSpec's
- default value.
- additional_labels: Labels to be added to the kubernetes service (by default only
- "app.kubernetes.io/name" is set to the service name)
- additional_selectors: Selectors to be added to the kubernetes service (by default only
- "app.kubernetes.io/name" is set to the service name)
- additional_annotations: Annotations to be added to the kubernetes service.
- """
- super().__init__(charm, "kubernetes-service-patch")
- self.charm = charm
- self.service_name = service_name if service_name else self._app
- self.service = self._service_object(
- ports,
- service_name,
- service_type,
- additional_labels,
- additional_selectors,
- additional_annotations,
- )
-
- # Make mypy type checking happy that self._patch is a method
- assert isinstance(self._patch, MethodType)
- # Ensure this patch is applied during the 'install' and 'upgrade-charm' events
- self.framework.observe(charm.on.install, self._patch)
- self.framework.observe(charm.on.upgrade_charm, self._patch)
-
- def _service_object(
- self,
- ports: List[ServicePort],
- service_name: str = None,
- service_type: ServiceType = "ClusterIP",
- additional_labels: dict = None,
- additional_selectors: dict = None,
- additional_annotations: dict = None,
- ) -> Service:
- """Creates a valid Service representation.
-
- Args:
- ports: a list of ServicePorts
- service_name: allows setting custom name to the patched service. If none given,
- application name will be used.
- service_type: desired type of K8s service. Default value is in line with ServiceSpec's
- default value.
- additional_labels: Labels to be added to the kubernetes service (by default only
- "app.kubernetes.io/name" is set to the service name)
- additional_selectors: Selectors to be added to the kubernetes service (by default only
- "app.kubernetes.io/name" is set to the service name)
- additional_annotations: Annotations to be added to the kubernetes service.
-
- Returns:
- Service: A valid representation of a Kubernetes Service with the correct ports.
- """
- if not service_name:
- service_name = self._app
- labels = {"app.kubernetes.io/name": self._app}
- if additional_labels:
- labels.update(additional_labels)
- selector = {"app.kubernetes.io/name": self._app}
- if additional_selectors:
- selector.update(additional_selectors)
- return Service(
- apiVersion="v1",
- kind="Service",
- metadata=ObjectMeta(
- namespace=self._namespace,
- name=service_name,
- labels=labels,
- annotations=additional_annotations, # type: ignore[arg-type]
- ),
- spec=ServiceSpec(
- selector=selector,
- ports=ports,
- type=service_type,
- ),
- )
-
- def _patch(self, _) -> None:
- """Patch the Kubernetes service created by Juju to map the correct port.
-
- Raises:
- PatchFailed: if patching fails due to lack of permissions, or otherwise.
- """
- if not self.charm.unit.is_leader():
- return
-
- client = Client()
- try:
- if self.service_name != self._app:
- self._delete_and_create_service(client)
- client.patch(Service, self.service_name, self.service, patch_type=PatchType.MERGE)
- except ApiError as e:
- if e.status.code == 403:
- logger.error("Kubernetes service patch failed: `juju trust` this application.")
- else:
- logger.error("Kubernetes service patch failed: %s", str(e))
- else:
- logger.info("Kubernetes service '%s' patched successfully", self._app)
-
- def _delete_and_create_service(self, client: Client):
- service = client.get(Service, self._app, namespace=self._namespace)
- service.metadata.name = self.service_name # type: ignore[attr-defined]
- service.metadata.resourceVersion = service.metadata.uid = None # type: ignore[attr-defined] # noqa: E501
- client.delete(Service, self._app, namespace=self._namespace)
- client.create(service)
-
- def is_patched(self) -> bool:
- """Reports if the service patch has been applied.
-
- Returns:
- bool: A boolean indicating if the service patch has been applied.
- """
- client = Client()
- # Get the relevant service from the cluster
- service = client.get(Service, name=self.service_name, namespace=self._namespace)
- # Construct a list of expected ports, should the patch be applied
- expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports]
- # Construct a list in the same manner, using the fetched service
- fetched_ports = [(p.port, p.targetPort) for p in service.spec.ports] # type: ignore[attr-defined] # noqa: E501
- return expected_ports == fetched_ports
-
- @property
- def _app(self) -> str:
- """Name of the current Juju application.
-
- Returns:
- str: A string containing the name of the current Juju application.
- """
- return self.charm.app.name
-
- @property
- def _namespace(self) -> str:
- """The Kubernetes namespace we're running in.
-
- Returns:
- str: A string containing the name of the current Kubernetes namespace.
- """
- with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f:
- return f.read().strip()
diff --git a/installers/charm/juju-simplestreams-operator/lib/charms/osm_libs/v0/utils.py b/installers/charm/juju-simplestreams-operator/lib/charms/osm_libs/v0/utils.py
deleted file mode 100644
index df3da94e..00000000
--- a/installers/charm/juju-simplestreams-operator/lib/charms/osm_libs/v0/utils.py
+++ /dev/null
@@ -1,544 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-# See LICENSE file for licensing details.
-# http://www.apache.org/licenses/LICENSE-2.0
-"""OSM Utils Library.
-
-This library offers some utilities made for but not limited to Charmed OSM.
-
-# Getting started
-
-Execute the following command inside your Charmed Operator folder to fetch the library.
-
-```shell
-charmcraft fetch-lib charms.osm_libs.v0.utils
-```
-
-# CharmError Exception
-
-An exception that takes to arguments, the message and the StatusBase class, which are useful
-to set the status of the charm when the exception raises.
-
-Example:
-```shell
-from charms.osm_libs.v0.utils import CharmError
-
-class MyCharm(CharmBase):
- def _on_config_changed(self, _):
- try:
- if not self.config.get("some-option"):
- raise CharmError("need some-option", BlockedStatus)
-
- if not self.mysql_ready:
- raise CharmError("waiting for mysql", WaitingStatus)
-
- # Do stuff...
-
- exception CharmError as e:
- self.unit.status = e.status
-```
-
-# Pebble validations
-
-The `check_container_ready` function checks that a container is ready,
-and therefore Pebble is ready.
-
-The `check_service_active` function checks that a service in a container is running.
-
-Both functions raise a CharmError if the validations fail.
-
-Example:
-```shell
-from charms.osm_libs.v0.utils import check_container_ready, check_service_active
-
-class MyCharm(CharmBase):
- def _on_config_changed(self, _):
- try:
- container: Container = self.unit.get_container("my-container")
- check_container_ready(container)
- check_service_active(container, "my-service")
- # Do stuff...
-
- exception CharmError as e:
- self.unit.status = e.status
-```
-
-# Debug-mode
-
-The debug-mode allows OSM developers to easily debug OSM modules.
-
-Example:
-```shell
-from charms.osm_libs.v0.utils import DebugMode
-
-class MyCharm(CharmBase):
- _stored = StoredState()
-
- def __init__(self, _):
- # ...
- container: Container = self.unit.get_container("my-container")
- hostpaths = [
- HostPath(
- config="module-hostpath",
- container_path="/usr/lib/python3/dist-packages/module"
- ),
- ]
- vscode_workspace_path = "files/vscode-workspace.json"
- self.debug_mode = DebugMode(
- self,
- self._stored,
- container,
- hostpaths,
- vscode_workspace_path,
- )
-
- def _on_update_status(self, _):
- if self.debug_mode.started:
- return
- # ...
-
- def _get_debug_mode_information(self):
- command = self.debug_mode.command
- password = self.debug_mode.password
- return command, password
-```
-
-# More
-
-- Get pod IP with `get_pod_ip()`
-"""
-from dataclasses import dataclass
-import logging
-import secrets
-import socket
-from pathlib import Path
-from typing import List
-
-from lightkube import Client
-from lightkube.models.core_v1 import HostPathVolumeSource, Volume, VolumeMount
-from lightkube.resources.apps_v1 import StatefulSet
-from ops.charm import CharmBase
-from ops.framework import Object, StoredState
-from ops.model import (
- ActiveStatus,
- BlockedStatus,
- Container,
- MaintenanceStatus,
- StatusBase,
- WaitingStatus,
-)
-from ops.pebble import ServiceStatus
-
-# The unique Charmhub library identifier, never change it
-LIBID = "e915908eebee4cdd972d484728adf984"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 3
-
-logger = logging.getLogger(__name__)
-
-
-class CharmError(Exception):
- """Charm Error Exception."""
-
- def __init__(self, message: str, status_class: StatusBase = BlockedStatus) -> None:
- self.message = message
- self.status_class = status_class
- self.status = status_class(message)
-
-
-def check_container_ready(container: Container) -> None:
- """Check Pebble has started in the container.
-
- Args:
- container (Container): Container to be checked.
-
- Raises:
- CharmError: if container is not ready.
- """
- if not container.can_connect():
- raise CharmError("waiting for pebble to start", MaintenanceStatus)
-
-
-def check_service_active(container: Container, service_name: str) -> None:
- """Check if the service is running.
-
- Args:
- container (Container): Container to be checked.
- service_name (str): Name of the service to check.
-
- Raises:
- CharmError: if the service is not running.
- """
- if service_name not in container.get_plan().services:
- raise CharmError(f"{service_name} service not configured yet", WaitingStatus)
-
- if container.get_service(service_name).current != ServiceStatus.ACTIVE:
- raise CharmError(f"{service_name} service is not running")
-
-
-def get_pod_ip() -> str:
- """Get Kubernetes Pod IP.
-
- Returns:
- str: The IP of the Pod.
- """
- return socket.gethostbyname(socket.gethostname())
-
-
-_DEBUG_SCRIPT = r"""#!/bin/bash
-# Install SSH
-
-function download_code(){{
- wget https://go.microsoft.com/fwlink/?LinkID=760868 -O code.deb
-}}
-
-function setup_envs(){{
- grep "source /debug.envs" /root/.bashrc || echo "source /debug.envs" | tee -a /root/.bashrc
-}}
-function setup_ssh(){{
- apt install ssh -y
- cat /etc/ssh/sshd_config |
- grep -E '^PermitRootLogin yes$$' || (
- echo PermitRootLogin yes |
- tee -a /etc/ssh/sshd_config
- )
- service ssh stop
- sleep 3
- service ssh start
- usermod --password $(echo {} | openssl passwd -1 -stdin) root
-}}
-
-function setup_code(){{
- apt install libasound2 -y
- (dpkg -i code.deb || apt-get install -f -y || apt-get install -f -y) && echo Code installed successfully
- code --install-extension ms-python.python --user-data-dir /root
- mkdir -p /root/.vscode-server
- cp -R /root/.vscode/extensions /root/.vscode-server/extensions
-}}
-
-export DEBIAN_FRONTEND=noninteractive
-apt update && apt install wget -y
-download_code &
-setup_ssh &
-setup_envs
-wait
-setup_code &
-wait
-"""
-
-
-@dataclass
-class SubModule:
- """Represent RO Submodules."""
- sub_module_path: str
- container_path: str
-
-
-class HostPath:
- """Represents a hostpath."""
- def __init__(self, config: str, container_path: str, submodules: dict = None) -> None:
- mount_path_items = config.split("-")
- mount_path_items.reverse()
- self.mount_path = "/" + "/".join(mount_path_items)
- self.config = config
- self.sub_module_dict = {}
- if submodules:
- for submodule in submodules.keys():
- self.sub_module_dict[submodule] = SubModule(
- sub_module_path=self.mount_path + "/" + submodule,
- container_path=submodules[submodule],
- )
- else:
- self.container_path = container_path
- self.module_name = container_path.split("/")[-1]
-
-class DebugMode(Object):
- """Class to handle the debug-mode."""
-
- def __init__(
- self,
- charm: CharmBase,
- stored: StoredState,
- container: Container,
- hostpaths: List[HostPath] = [],
- vscode_workspace_path: str = "files/vscode-workspace.json",
- ) -> None:
- super().__init__(charm, "debug-mode")
-
- self.charm = charm
- self._stored = stored
- self.hostpaths = hostpaths
- self.vscode_workspace = Path(vscode_workspace_path).read_text()
- self.container = container
-
- self._stored.set_default(
- debug_mode_started=False,
- debug_mode_vscode_command=None,
- debug_mode_password=None,
- )
-
- self.framework.observe(self.charm.on.config_changed, self._on_config_changed)
- self.framework.observe(self.charm.on[container.name].pebble_ready, self._on_config_changed)
- self.framework.observe(self.charm.on.update_status, self._on_update_status)
-
- def _on_config_changed(self, _) -> None:
- """Handler for the config-changed event."""
- if not self.charm.unit.is_leader():
- return
-
- debug_mode_enabled = self.charm.config.get("debug-mode", False)
- action = self.enable if debug_mode_enabled else self.disable
- action()
-
- def _on_update_status(self, _) -> None:
- """Handler for the update-status event."""
- if not self.charm.unit.is_leader() or not self.started:
- return
-
- self.charm.unit.status = ActiveStatus("debug-mode: ready")
-
- @property
- def started(self) -> bool:
- """Indicates whether the debug-mode has started or not."""
- return self._stored.debug_mode_started
-
- @property
- def command(self) -> str:
- """Command to launch vscode."""
- return self._stored.debug_mode_vscode_command
-
- @property
- def password(self) -> str:
- """SSH password."""
- return self._stored.debug_mode_password
-
- def enable(self, service_name: str = None) -> None:
- """Enable debug-mode.
-
- This function mounts hostpaths of the OSM modules (if set), and
- configures the container so it can be easily debugged. The setup
- includes the configuration of SSH, environment variables, and
- VSCode workspace and plugins.
-
- Args:
- service_name (str, optional): Pebble service name which has the desired environment
- variables. Mandatory if there is more than one Pebble service configured.
- """
- hostpaths_to_reconfigure = self._hostpaths_to_reconfigure()
- if self.started and not hostpaths_to_reconfigure:
- self.charm.unit.status = ActiveStatus("debug-mode: ready")
- return
-
- logger.debug("enabling debug-mode")
-
- # Mount hostpaths if set.
- # If hostpaths are mounted, the statefulset will be restarted,
- # and for that reason we return immediately. On restart, the hostpaths
- # won't be mounted and then we can continue and setup the debug-mode.
- if hostpaths_to_reconfigure:
- self.charm.unit.status = MaintenanceStatus("debug-mode: configuring hostpaths")
- self._configure_hostpaths(hostpaths_to_reconfigure)
- return
-
- self.charm.unit.status = MaintenanceStatus("debug-mode: starting")
- password = secrets.token_hex(8)
- self._setup_debug_mode(
- password,
- service_name,
- mounted_hostpaths=[hp for hp in self.hostpaths if self.charm.config.get(hp.config)],
- )
-
- self._stored.debug_mode_vscode_command = self._get_vscode_command(get_pod_ip())
- self._stored.debug_mode_password = password
- self._stored.debug_mode_started = True
- logger.info("debug-mode is ready")
- self.charm.unit.status = ActiveStatus("debug-mode: ready")
-
- def disable(self) -> None:
- """Disable debug-mode."""
- logger.debug("disabling debug-mode")
- current_status = self.charm.unit.status
- hostpaths_unmounted = self._unmount_hostpaths()
-
- if not self._stored.debug_mode_started:
- return
- self._stored.debug_mode_started = False
- self._stored.debug_mode_vscode_command = None
- self._stored.debug_mode_password = None
-
- if not hostpaths_unmounted:
- self.charm.unit.status = current_status
- self._restart()
-
- def _hostpaths_to_reconfigure(self) -> List[HostPath]:
- hostpaths_to_reconfigure: List[HostPath] = []
- client = Client()
- statefulset = client.get(StatefulSet, self.charm.app.name, namespace=self.charm.model.name)
- volumes = statefulset.spec.template.spec.volumes
-
- for hostpath in self.hostpaths:
- hostpath_is_set = True if self.charm.config.get(hostpath.config) else False
- hostpath_already_configured = next(
- (True for volume in volumes if volume.name == hostpath.config), False
- )
- if hostpath_is_set != hostpath_already_configured:
- hostpaths_to_reconfigure.append(hostpath)
-
- return hostpaths_to_reconfigure
-
- def _setup_debug_mode(
- self,
- password: str,
- service_name: str = None,
- mounted_hostpaths: List[HostPath] = [],
- ) -> None:
- services = self.container.get_plan().services
- if not service_name and len(services) != 1:
- raise Exception("Cannot start debug-mode: please set the service_name")
-
- service = None
- if not service_name:
- service_name, service = services.popitem()
- if not service:
- service = services.get(service_name)
-
- logger.debug(f"getting environment variables from service {service_name}")
- environment = service.environment
- environment_file_content = "\n".join(
- [f'export {key}="{value}"' for key, value in environment.items()]
- )
- logger.debug(f"pushing environment file to {self.container.name} container")
- self.container.push("/debug.envs", environment_file_content)
-
- # Push VSCode workspace
- logger.debug(f"pushing vscode workspace to {self.container.name} container")
- self.container.push("/debug.code-workspace", self.vscode_workspace)
-
- # Execute debugging script
- logger.debug(f"pushing debug-mode setup script to {self.container.name} container")
- self.container.push("/debug.sh", _DEBUG_SCRIPT.format(password), permissions=0o777)
- logger.debug(f"executing debug-mode setup script in {self.container.name} container")
- self.container.exec(["/debug.sh"]).wait_output()
- logger.debug(f"stopping service {service_name} in {self.container.name} container")
- self.container.stop(service_name)
-
- # Add symlinks to mounted hostpaths
- for hostpath in mounted_hostpaths:
- logger.debug(f"adding symlink for {hostpath.config}")
- if len(hostpath.sub_module_dict) > 0:
- for sub_module in hostpath.sub_module_dict.keys():
- self.container.exec(["rm", "-rf", hostpath.sub_module_dict[sub_module].container_path]).wait_output()
- self.container.exec(
- [
- "ln",
- "-s",
- hostpath.sub_module_dict[sub_module].sub_module_path,
- hostpath.sub_module_dict[sub_module].container_path,
- ]
- )
-
- else:
- self.container.exec(["rm", "-rf", hostpath.container_path]).wait_output()
- self.container.exec(
- [
- "ln",
- "-s",
- f"{hostpath.mount_path}/{hostpath.module_name}",
- hostpath.container_path,
- ]
- )
-
- def _configure_hostpaths(self, hostpaths: List[HostPath]):
- client = Client()
- statefulset = client.get(StatefulSet, self.charm.app.name, namespace=self.charm.model.name)
-
- for hostpath in hostpaths:
- if self.charm.config.get(hostpath.config):
- self._add_hostpath_to_statefulset(hostpath, statefulset)
- else:
- self._delete_hostpath_from_statefulset(hostpath, statefulset)
-
- client.replace(statefulset)
-
- def _unmount_hostpaths(self) -> bool:
- client = Client()
- hostpath_unmounted = False
- statefulset = client.get(StatefulSet, self.charm.app.name, namespace=self.charm.model.name)
-
- for hostpath in self.hostpaths:
- if self._delete_hostpath_from_statefulset(hostpath, statefulset):
- hostpath_unmounted = True
-
- if hostpath_unmounted:
- client.replace(statefulset)
-
- return hostpath_unmounted
-
- def _add_hostpath_to_statefulset(self, hostpath: HostPath, statefulset: StatefulSet):
- # Add volume
- logger.debug(f"adding volume {hostpath.config} to {self.charm.app.name} statefulset")
- volume = Volume(
- hostpath.config,
- hostPath=HostPathVolumeSource(
- path=self.charm.config[hostpath.config],
- type="Directory",
- ),
- )
- statefulset.spec.template.spec.volumes.append(volume)
-
- # Add volumeMount
- for statefulset_container in statefulset.spec.template.spec.containers:
- if statefulset_container.name != self.container.name:
- continue
-
- logger.debug(
- f"adding volumeMount {hostpath.config} to {self.container.name} container"
- )
- statefulset_container.volumeMounts.append(
- VolumeMount(mountPath=hostpath.mount_path, name=hostpath.config)
- )
-
- def _delete_hostpath_from_statefulset(self, hostpath: HostPath, statefulset: StatefulSet):
- hostpath_unmounted = False
- for volume in statefulset.spec.template.spec.volumes:
-
- if hostpath.config != volume.name:
- continue
-
- # Remove volumeMount
- for statefulset_container in statefulset.spec.template.spec.containers:
- if statefulset_container.name != self.container.name:
- continue
- for volume_mount in statefulset_container.volumeMounts:
- if volume_mount.name != hostpath.config:
- continue
-
- logger.debug(
- f"removing volumeMount {hostpath.config} from {self.container.name} container"
- )
- statefulset_container.volumeMounts.remove(volume_mount)
-
- # Remove volume
- logger.debug(
- f"removing volume {hostpath.config} from {self.charm.app.name} statefulset"
- )
- statefulset.spec.template.spec.volumes.remove(volume)
-
- hostpath_unmounted = True
- return hostpath_unmounted
-
- def _get_vscode_command(
- self,
- pod_ip: str,
- user: str = "root",
- workspace_path: str = "/debug.code-workspace",
- ) -> str:
- return f"code --remote ssh-remote+{user}@{pod_ip} {workspace_path}"
-
- def _restart(self):
- self.container.exec(["kill", "-HUP", "1"])
diff --git a/installers/charm/juju-simplestreams-operator/metadata.yaml b/installers/charm/juju-simplestreams-operator/metadata.yaml
deleted file mode 100644
index 03b9aa68..00000000
--- a/installers/charm/juju-simplestreams-operator/metadata.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Overview on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-name: osm-juju-simplestreams
-
-# The following metadata are human-readable and will be published prominently on Charmhub.
-
-display-name: Juju simplestreams
-
-summary: Basic http server exposing simplestreams for juju
-
-description: |
- TODO
-
-containers:
- server:
- resource: server-image
-
-# This file populates the Resources tab on Charmhub.
-
-resources:
- server-image:
- type: oci-image
- description: OCI image for server
- upstream-source: nginx:1.23.0
-
-peers:
- peer:
- interface: peer
-
-requires:
- ingress:
- interface: ingress
- limit: 1
diff --git a/installers/charm/juju-simplestreams-operator/pyproject.toml b/installers/charm/juju-simplestreams-operator/pyproject.toml
deleted file mode 100644
index 16cf0f4b..00000000
--- a/installers/charm/juju-simplestreams-operator/pyproject.toml
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-
-# Testing tools configuration
-[tool.coverage.run]
-branch = true
-
-[tool.coverage.report]
-show_missing = true
-
-[tool.pytest.ini_options]
-minversion = "6.0"
-log_cli_level = "INFO"
-
-# Formatting tools configuration
-[tool.black]
-line-length = 99
-target-version = ["py38"]
-
-[tool.isort]
-profile = "black"
-
-# Linting tools configuration
-[tool.flake8]
-max-line-length = 99
-max-doc-length = 99
-max-complexity = 10
-exclude = [".git", "__pycache__", ".tox", "build", "dist", "*.egg_info", "venv"]
-select = ["E", "W", "F", "C", "N", "R", "D", "H"]
-# Ignore W503, E501 because using black creates errors with this
-# Ignore D107 Missing docstring in __init__
-ignore = ["W503", "E501", "D107"]
-# D100, D101, D102, D103: Ignore missing docstrings in tests
-per-file-ignores = ["tests/*:D100,D101,D102,D103,D104"]
-docstring-convention = "google"
diff --git a/installers/charm/juju-simplestreams-operator/requirements.txt b/installers/charm/juju-simplestreams-operator/requirements.txt
deleted file mode 100644
index 398d4ad3..00000000
--- a/installers/charm/juju-simplestreams-operator/requirements.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-ops < 2.2
-lightkube
-lightkube-models
-# git+https://github.com/charmed-osm/config-validator/
diff --git a/installers/charm/juju-simplestreams-operator/src/charm.py b/installers/charm/juju-simplestreams-operator/src/charm.py
deleted file mode 100755
index 555aab00..00000000
--- a/installers/charm/juju-simplestreams-operator/src/charm.py
+++ /dev/null
@@ -1,249 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# Learn more at: https://juju.is/docs/sdk
-
-"""Juju simpletreams charm."""
-
-import logging
-import subprocess
-from dataclasses import dataclass
-from pathlib import Path
-from typing import Any, Dict
-
-from charms.nginx_ingress_integrator.v0.ingress import IngressRequires
-from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch
-from charms.osm_libs.v0.utils import (
- CharmError,
- check_container_ready,
- check_service_active,
-)
-from lightkube.models.core_v1 import ServicePort
-from ops.charm import ActionEvent, CharmBase
-from ops.main import main
-from ops.model import ActiveStatus, Container
-
-SERVICE_PORT = 8080
-
-logger = logging.getLogger(__name__)
-container_name = "server"
-
-
-@dataclass
-class ImageMetadata:
- """Image Metadata."""
-
- region: str
- auth_url: str
- image_id: str
- series: str
-
-
-class JujuSimplestreamsCharm(CharmBase):
- """Simplestreams Kubernetes sidecar charm."""
-
- def __init__(self, *args):
- super().__init__(*args)
- self.ingress = IngressRequires(
- self,
- {
- "service-hostname": self.external_hostname,
- "service-name": self.app.name,
- "service-port": SERVICE_PORT,
- },
- )
- event_handler_mapping = {
- # Core lifecycle events
- self.on["server"].pebble_ready: self._on_server_pebble_ready,
- self.on.update_status: self._on_update_status,
- self.on["peer"].relation_changed: self._push_image_metadata_from_relation,
- # Action events
- self.on["add-image-metadata"].action: self._on_add_image_metadata_action,
- }
-
- for event, handler in event_handler_mapping.items():
- self.framework.observe(event, handler)
-
- port = ServicePort(SERVICE_PORT, name=f"{self.app.name}")
- self.service_patcher = KubernetesServicePatch(self, [port])
- self.container: Container = self.unit.get_container(container_name)
- self.unit.set_workload_version(self.unit.name)
-
- @property
- def external_hostname(self) -> str:
- """External hostname property.
-
- Returns:
- str: the external hostname from config.
- If not set, return the ClusterIP service name.
- """
- return self.config.get("external-hostname") or self.app.name
-
- # ---------------------------------------------------------------------------
- # Handlers for Charm Events
- # ---------------------------------------------------------------------------
-
- def _on_server_pebble_ready(self, _) -> None:
- """Handler for the config-changed event."""
- try:
- self._push_configuration()
- self._configure_service()
- self._push_image_metadata_from_relation()
- # Update charm status
- self._on_update_status()
- except CharmError as e:
- logger.debug(e.message)
- self.unit.status = e.status
-
- def _on_update_status(self, _=None) -> None:
- """Handler for the update-status event."""
- try:
- check_container_ready(self.container)
- check_service_active(self.container, container_name)
- self.unit.status = ActiveStatus()
- except CharmError as e:
- logger.debug(e.message)
- self.unit.status = e.status
-
- def _push_image_metadata_from_relation(self, _=None):
- subprocess.run(["rm", "-rf", "/tmp/simplestreams"])
- subprocess.run(["mkdir", "-p", "/tmp/simplestreams"])
- image_metadata_dict = self._get_image_metadata_from_relation()
- for image_metadata in image_metadata_dict.values():
- subprocess.run(
- [
- "files/juju-metadata",
- "generate-image",
- "-d",
- "/tmp/simplestreams",
- "-i",
- image_metadata.image_id,
- "-s",
- image_metadata.series,
- "-r",
- image_metadata.region,
- "-u",
- image_metadata.auth_url,
- ]
- )
- subprocess.run(["chmod", "555", "-R", "/tmp/simplestreams"])
- self.container.push_path("/tmp/simplestreams", "/app/static")
-
- def _on_add_image_metadata_action(self, event: ActionEvent):
- relation = self.model.get_relation("peer")
- try:
- if not relation:
- raise Exception("charm has not been fully initialized. Try again later.")
- if not self.unit.is_leader():
- raise Exception("I am not the leader!")
- if any(
- prohibited_char in param_value
- for prohibited_char in ",; "
- for param_value in event.params.values()
- ):
- event.fail("invalid params")
- return
-
- image_metadata_dict = self._get_image_metadata_from_relation()
-
- new_image_metadata = ImageMetadata(
- region=event.params["region"],
- auth_url=event.params["auth-url"],
- image_id=event.params["image-id"],
- series=event.params["series"],
- )
-
- image_metadata_dict[event.params["image-id"]] = new_image_metadata
-
- new_relation_data = []
- for image_metadata in image_metadata_dict.values():
- new_relation_data.append(
- f"{image_metadata.image_id};{image_metadata.series};{image_metadata.region};{image_metadata.auth_url}"
- )
- relation.data[self.app]["data"] = ",".join(new_relation_data)
- except Exception as e:
- event.fail(f"Action failed: {e}")
- logger.error(f"Action failed: {e}")
-
- # ---------------------------------------------------------------------------
- # Validation and configuration and more
- # ---------------------------------------------------------------------------
-
- def _get_image_metadata_from_relation(self) -> Dict[str, ImageMetadata]:
- if not (relation := self.model.get_relation("peer")):
- return {}
-
- image_metadata_dict: Dict[str, ImageMetadata] = {}
-
- relation_data = relation.data[self.app].get("data", "")
- if relation_data:
- for image_metadata_string in relation_data.split(","):
- image_id, series, region, auth_url = image_metadata_string.split(";")
- image_metadata_dict[image_id] = ImageMetadata(
- region=region,
- auth_url=auth_url,
- image_id=image_id,
- series=series,
- )
-
- return image_metadata_dict
-
- def _configure_service(self) -> None:
- """Add Pebble layer with the ro service."""
- logger.debug(f"configuring {self.app.name} service")
- self.container.add_layer(container_name, self._get_layer(), combine=True)
- self.container.replan()
-
- def _push_configuration(self) -> None:
- """Push nginx configuration to the container."""
- self.container.push("/etc/nginx/nginx.conf", Path("files/nginx.conf").read_text())
- self.container.make_dir("/app/static", make_parents=True)
-
- def _update_ingress_config(self) -> None:
- """Update ingress config in relation."""
- ingress_config = {
- "service-hostname": self.external_hostname,
- "max-body-size": self.config["max-body-size"],
- }
- if "tls-secret-name" in self.config:
- ingress_config["tls-secret-name"] = self.config["tls-secret-name"]
- logger.debug(f"updating ingress-config: {ingress_config}")
- self.ingress.update_config(ingress_config)
-
- def _get_layer(self) -> Dict[str, Any]:
- """Get layer for Pebble."""
- return {
- "summary": "server layer",
- "description": "pebble config layer for server",
- "services": {
- container_name: {
- "override": "replace",
- "summary": "server service",
- "command": 'nginx -g "daemon off;"',
- "startup": "enabled",
- }
- },
- }
-
-
-if __name__ == "__main__": # pragma: no cover
- main(JujuSimplestreamsCharm)
diff --git a/installers/charm/juju-simplestreams-operator/tests/unit/test_charm.py b/installers/charm/juju-simplestreams-operator/tests/unit/test_charm.py
deleted file mode 100644
index 0273352e..00000000
--- a/installers/charm/juju-simplestreams-operator/tests/unit/test_charm.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-# Learn more about testing at: https://juju.is/docs/sdk/testing
-
-import pytest
-from ops.model import ActiveStatus
-from ops.testing import Harness
-from pytest_mock import MockerFixture
-
-from charm import JujuSimplestreamsCharm
-
-container_name = "server"
-service_name = "server"
-
-
-@pytest.fixture
-def harness(mocker: MockerFixture):
- mocker.patch("charm.KubernetesServicePatch", lambda x, y: None)
- harness = Harness(JujuSimplestreamsCharm)
- harness.begin()
- harness.charm.container.make_dir("/etc/nginx", make_parents=True)
- yield harness
- harness.cleanup()
-
-
-def test_ready(harness: Harness):
- harness.charm.on.server_pebble_ready.emit(container_name)
- assert harness.charm.unit.status == ActiveStatus()
-
-
-def test_add_metadata_action(harness: Harness, mocker: MockerFixture):
- harness.set_leader(True)
- remote_unit = f"{harness.charm.app.name}/1"
- relation_id = harness.add_relation("peer", harness.charm.app.name)
- harness.add_relation_unit(relation_id, remote_unit)
- event = mocker.Mock()
- event.params = {
- "region": "microstack",
- "auth-url": "localhost",
- "image-id": "id",
- "series": "focal",
- }
- harness.charm._on_add_image_metadata_action(event)
- # Harness not emitting relation changed event when in the action
- # I update application data in the peer relation.
- # Manually emitting it here:
- relation = harness.charm.model.get_relation("peer")
- harness.charm.on["peer"].relation_changed.emit(relation)
- assert harness.charm.container.exists("/app/static/simplestreams/images/streams/v1/index.json")
diff --git a/installers/charm/juju-simplestreams-operator/tox.ini b/installers/charm/juju-simplestreams-operator/tox.ini
deleted file mode 100644
index 0268da8a..00000000
--- a/installers/charm/juju-simplestreams-operator/tox.ini
+++ /dev/null
@@ -1,91 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-
-[tox]
-skipsdist=True
-skip_missing_interpreters = True
-envlist = lint, unit
-
-[vars]
-src_path = {toxinidir}/src/
-tst_path = {toxinidir}/tests/
-all_path = {[vars]src_path} {[vars]tst_path}
-
-[testenv]
-setenv =
- PYTHONPATH = {toxinidir}:{toxinidir}/lib:{[vars]src_path}
- PYTHONBREAKPOINT=ipdb.set_trace
- PY_COLORS=1
-passenv =
- PYTHONPATH
- CHARM_BUILD_DIR
- MODEL_SETTINGS
-
-[testenv:fmt]
-description = Apply coding style standards to code
-deps =
- black
- isort
-commands =
- isort {[vars]all_path}
- black {[vars]all_path}
-
-[testenv:lint]
-description = Check code against coding style standards
-deps =
- black
- flake8
- flake8-docstrings
- flake8-builtins
- pyproject-flake8
- pep8-naming
- isort
- codespell
-commands =
- codespell {toxinidir}/. --skip {toxinidir}/.git --skip {toxinidir}/.tox \
- --skip {toxinidir}/build --skip {toxinidir}/lib --skip {toxinidir}/venv \
- --skip {toxinidir}/.mypy_cache --skip {toxinidir}/icon.svg
- # pflake8 wrapper supports config from pyproject.toml
- pflake8 {[vars]all_path}
- isort --check-only --diff {[vars]all_path}
- black --check --diff {[vars]all_path}
-
-[testenv:unit]
-description = Run unit tests
-deps =
- pytest
- pytest-mock
- coverage[toml]
- -r{toxinidir}/requirements.txt
-commands =
- coverage run --source={[vars]src_path} \
- -m pytest --ignore={[vars]tst_path}integration -v --tb native -s {posargs}
- coverage report
- coverage xml
-
-[testenv:integration]
-description = Run integration tests
-deps =
- pytest
- juju
- pytest-operator
- -r{toxinidir}/requirements.txt
-commands =
- pytest -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs}
diff --git a/installers/charm/kafka-exporter/.gitignore b/installers/charm/kafka-exporter/.gitignore
deleted file mode 100644
index 2885df27..00000000
--- a/installers/charm/kafka-exporter/.gitignore
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-venv
-.vscode
-build
-*.charm
-.coverage
-coverage.xml
-.stestr
-cover
-release
\ No newline at end of file
diff --git a/installers/charm/kafka-exporter/.jujuignore b/installers/charm/kafka-exporter/.jujuignore
deleted file mode 100644
index 3ae3e7dc..00000000
--- a/installers/charm/kafka-exporter/.jujuignore
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-venv
-.vscode
-build
-*.charm
-.coverage
-coverage.xml
-.gitignore
-.stestr
-cover
-release
-tests/
-requirements*
-tox.ini
diff --git a/installers/charm/kafka-exporter/.yamllint.yaml b/installers/charm/kafka-exporter/.yamllint.yaml
deleted file mode 100644
index d71fb69f..00000000
--- a/installers/charm/kafka-exporter/.yamllint.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
----
-extends: default
-
-yaml-files:
- - "*.yaml"
- - "*.yml"
- - ".yamllint"
-ignore: |
- .tox
- cover/
- build/
- venv
- release/
diff --git a/installers/charm/kafka-exporter/README.md b/installers/charm/kafka-exporter/README.md
deleted file mode 100644
index ae9babf4..00000000
--- a/installers/charm/kafka-exporter/README.md
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-# Prometheus kafka exporter operator Charm for Kubernetes
-
-## Requirements
diff --git a/installers/charm/kafka-exporter/charmcraft.yaml b/installers/charm/kafka-exporter/charmcraft.yaml
deleted file mode 100644
index 0a285a9d..00000000
--- a/installers/charm/kafka-exporter/charmcraft.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-type: charm
-bases:
- - build-on:
- - name: ubuntu
- channel: "20.04"
- architectures: ["amd64"]
- run-on:
- - name: ubuntu
- channel: "20.04"
- architectures:
- - amd64
- - aarch64
- - arm64
-parts:
- charm:
- build-packages: [git]
diff --git a/installers/charm/kafka-exporter/config.yaml b/installers/charm/kafka-exporter/config.yaml
deleted file mode 100644
index 59313360..00000000
--- a/installers/charm/kafka-exporter/config.yaml
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-options:
- ingress_class:
- type: string
- description: |
- Ingress class name. This is useful for selecting the ingress to be used
- in case there are multiple ingresses in the underlying k8s clusters.
- ingress_whitelist_source_range:
- type: string
- description: |
- A comma-separated list of CIDRs to store in the
- ingress.kubernetes.io/whitelist-source-range annotation.
-
- This can be used to lock down access to
- Keystone based on source IP address.
- default: ""
- tls_secret_name:
- type: string
- description: TLS Secret name
- default: ""
- site_url:
- type: string
- description: Ingress URL
- default: ""
- cluster_issuer:
- type: string
- description: Name of the cluster issuer for TLS certificates
- default: ""
- image_pull_policy:
- type: string
- description: |
- ImagePullPolicy configuration for the pod.
- Possible values: always, ifnotpresent, never
- default: always
- security_context:
- description: Enables the security context of the pods
- type: boolean
- default: false
- kafka_endpoint:
- description: Host and port of Kafka in the format :
- type: string
diff --git a/installers/charm/kafka-exporter/lib/charms/kafka_k8s/v0/kafka.py b/installers/charm/kafka-exporter/lib/charms/kafka_k8s/v0/kafka.py
deleted file mode 100644
index 1baf9a88..00000000
--- a/installers/charm/kafka-exporter/lib/charms/kafka_k8s/v0/kafka.py
+++ /dev/null
@@ -1,207 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-# See LICENSE file for licensing details.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Kafka library.
-
-This [library](https://juju.is/docs/sdk/libraries) implements both sides of the
-`kafka` [interface](https://juju.is/docs/sdk/relations).
-
-The *provider* side of this interface is implemented by the
-[kafka-k8s Charmed Operator](https://charmhub.io/kafka-k8s).
-
-Any Charmed Operator that *requires* Kafka for providing its
-service should implement the *requirer* side of this interface.
-
-In a nutshell using this library to implement a Charmed Operator *requiring*
-Kafka would look like
-
-```
-$ charmcraft fetch-lib charms.kafka_k8s.v0.kafka
-```
-
-`metadata.yaml`:
-
-```
-requires:
- kafka:
- interface: kafka
- limit: 1
-```
-
-`src/charm.py`:
-
-```
-from charms.kafka_k8s.v0.kafka import KafkaEvents, KafkaRequires
-from ops.charm import CharmBase
-
-
-class MyCharm(CharmBase):
-
- on = KafkaEvents()
-
- def __init__(self, *args):
- super().__init__(*args)
- self.kafka = KafkaRequires(self)
- self.framework.observe(
- self.on.kafka_available,
- self._on_kafka_available,
- )
- self.framework.observe(
- self.on.kafka_broken,
- self._on_kafka_broken,
- )
-
- def _on_kafka_available(self, event):
- # Get Kafka host and port
- host: str = self.kafka.host
- port: int = self.kafka.port
- # host => "kafka-k8s"
- # port => 9092
-
- def _on_kafka_broken(self, event):
- # Stop service
- # ...
- self.unit.status = BlockedStatus("need kafka relation")
-```
-
-You can file bugs
-[here](https://github.com/charmed-osm/kafka-k8s-operator/issues)!
-"""
-
-from typing import Optional
-
-from ops.charm import CharmBase, CharmEvents
-from ops.framework import EventBase, EventSource, Object
-
-# The unique Charmhub library identifier, never change it
-from ops.model import Relation
-
-LIBID = "eacc8c85082347c9aae740e0220b8376"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 3
-
-
-KAFKA_HOST_APP_KEY = "host"
-KAFKA_PORT_APP_KEY = "port"
-
-
-class _KafkaAvailableEvent(EventBase):
- """Event emitted when Kafka is available."""
-
-
-class _KafkaBrokenEvent(EventBase):
- """Event emitted when Kafka relation is broken."""
-
-
-class KafkaEvents(CharmEvents):
- """Kafka events.
-
- This class defines the events that Kafka can emit.
-
- Events:
- kafka_available (_KafkaAvailableEvent)
- """
-
- kafka_available = EventSource(_KafkaAvailableEvent)
- kafka_broken = EventSource(_KafkaBrokenEvent)
-
-
-class KafkaRequires(Object):
- """Requires-side of the Kafka relation."""
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "kafka") -> None:
- super().__init__(charm, endpoint_name)
- self.charm = charm
- self._endpoint_name = endpoint_name
-
- # Observe relation events
- event_observe_mapping = {
- charm.on[self._endpoint_name].relation_changed: self._on_relation_changed,
- charm.on[self._endpoint_name].relation_broken: self._on_relation_broken,
- }
- for event, observer in event_observe_mapping.items():
- self.framework.observe(event, observer)
-
- def _on_relation_changed(self, event) -> None:
- if event.relation.app and all(
- key in event.relation.data[event.relation.app]
- for key in (KAFKA_HOST_APP_KEY, KAFKA_PORT_APP_KEY)
- ):
- self.charm.on.kafka_available.emit()
-
- def _on_relation_broken(self, _) -> None:
- self.charm.on.kafka_broken.emit()
-
- @property
- def host(self) -> str:
- relation: Relation = self.model.get_relation(self._endpoint_name)
- return (
- relation.data[relation.app].get(KAFKA_HOST_APP_KEY)
- if relation and relation.app
- else None
- )
-
- @property
- def port(self) -> int:
- relation: Relation = self.model.get_relation(self._endpoint_name)
- return (
- int(relation.data[relation.app].get(KAFKA_PORT_APP_KEY))
- if relation and relation.app
- else None
- )
-
-
-class KafkaProvides(Object):
- """Provides-side of the Kafka relation."""
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "kafka") -> None:
- super().__init__(charm, endpoint_name)
- self._endpoint_name = endpoint_name
-
- def set_host_info(self, host: str, port: int, relation: Optional[Relation] = None) -> None:
- """Set Kafka host and port.
-
- This function writes in the application data of the relation, therefore,
- only the unit leader can call it.
-
- Args:
- host (str): Kafka hostname or IP address.
- port (int): Kafka port.
- relation (Optional[Relation]): Relation to update.
- If not specified, all relations will be updated.
-
- Raises:
- Exception: if a non-leader unit calls this function.
- """
- if not self.model.unit.is_leader():
- raise Exception("only the leader set host information.")
-
- if relation:
- self._update_relation_data(host, port, relation)
- return
-
- for relation in self.model.relations[self._endpoint_name]:
- self._update_relation_data(host, port, relation)
-
- def _update_relation_data(self, host: str, port: int, relation: Relation) -> None:
- """Update data in relation if needed."""
- relation.data[self.model.app][KAFKA_HOST_APP_KEY] = host
- relation.data[self.model.app][KAFKA_PORT_APP_KEY] = str(port)
diff --git a/installers/charm/kafka-exporter/metadata.yaml b/installers/charm/kafka-exporter/metadata.yaml
deleted file mode 100644
index a70b3b68..00000000
--- a/installers/charm/kafka-exporter/metadata.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-name: kafka-exporter-k8s
-summary: OSM Prometheus Kafka Exporter
-description: |
- A CAAS charm to deploy OSM's Prometheus Kafka Exporter.
-series:
- - kubernetes
-tags:
- - kubernetes
- - osm
- - prometheus
- - kafka-exporter
-min-juju-version: 2.8.0
-deployment:
- type: stateless
- service: cluster
-resources:
- image:
- type: oci-image
- description: Image of kafka-exporter
- upstream-source: "bitnami/kafka-exporter:1.4.2"
-requires:
- kafka:
- interface: kafka
-provides:
- prometheus-scrape:
- interface: prometheus
- grafana-dashboard:
- interface: grafana-dashboard
diff --git a/installers/charm/kafka-exporter/requirements-test.txt b/installers/charm/kafka-exporter/requirements-test.txt
deleted file mode 100644
index 316f6d20..00000000
--- a/installers/charm/kafka-exporter/requirements-test.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-
-mock==4.0.3
diff --git a/installers/charm/kafka-exporter/requirements.txt b/installers/charm/kafka-exporter/requirements.txt
deleted file mode 100644
index 8bb93ad3..00000000
--- a/installers/charm/kafka-exporter/requirements.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-git+https://github.com/charmed-osm/ops-lib-charmed-osm/@master
diff --git a/installers/charm/kafka-exporter/src/charm.py b/installers/charm/kafka-exporter/src/charm.py
deleted file mode 100755
index 07a854fd..00000000
--- a/installers/charm/kafka-exporter/src/charm.py
+++ /dev/null
@@ -1,266 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-# pylint: disable=E0213
-
-from ipaddress import ip_network
-import logging
-from pathlib import Path
-from typing import NoReturn, Optional
-from urllib.parse import urlparse
-
-from charms.kafka_k8s.v0.kafka import KafkaEvents, KafkaRequires
-from ops.main import main
-from opslib.osm.charm import CharmedOsmBase, RelationsMissing
-from opslib.osm.interfaces.grafana import GrafanaDashboardTarget
-from opslib.osm.interfaces.prometheus import PrometheusScrapeTarget
-from opslib.osm.pod import (
- ContainerV3Builder,
- IngressResourceV3Builder,
- PodSpecV3Builder,
-)
-from opslib.osm.validator import ModelValidator, validator
-
-
-logger = logging.getLogger(__name__)
-
-PORT = 9308
-
-
-class ConfigModel(ModelValidator):
- site_url: Optional[str]
- cluster_issuer: Optional[str]
- ingress_class: Optional[str]
- ingress_whitelist_source_range: Optional[str]
- tls_secret_name: Optional[str]
- image_pull_policy: str
- security_context: bool
- kafka_endpoint: Optional[str]
-
- @validator("site_url")
- def validate_site_url(cls, v):
- if v:
- parsed = urlparse(v)
- if not parsed.scheme.startswith("http"):
- raise ValueError("value must start with http")
- return v
-
- @validator("ingress_whitelist_source_range")
- def validate_ingress_whitelist_source_range(cls, v):
- if v:
- ip_network(v)
- return v
-
- @validator("image_pull_policy")
- def validate_image_pull_policy(cls, v):
- values = {
- "always": "Always",
- "ifnotpresent": "IfNotPresent",
- "never": "Never",
- }
- v = v.lower()
- if v not in values.keys():
- raise ValueError("value must be always, ifnotpresent or never")
- return values[v]
-
- @validator("kafka_endpoint")
- def validate_kafka_endpoint(cls, v):
- if v and len(v.split(":")) != 2:
- raise ValueError("value must be in the format :")
- return v
-
-
-class KafkaEndpoint:
- def __init__(self, host: str, port: str) -> None:
- self.host = host
- self.port = port
-
-
-class KafkaExporterCharm(CharmedOsmBase):
- on = KafkaEvents()
-
- def __init__(self, *args) -> NoReturn:
- super().__init__(*args, oci_image="image")
-
- # Provision Kafka relation to exchange information
- self.kafka = KafkaRequires(self)
- self.framework.observe(self.on.kafka_available, self.configure_pod)
- self.framework.observe(self.on.kafka_broken, self.configure_pod)
-
- # Register relation to provide a Scraping Target
- self.scrape_target = PrometheusScrapeTarget(self, "prometheus-scrape")
- self.framework.observe(
- self.on["prometheus-scrape"].relation_joined, self._publish_scrape_info
- )
-
- # Register relation to provide a Dasboard Target
- self.dashboard_target = GrafanaDashboardTarget(self, "grafana-dashboard")
- self.framework.observe(
- self.on["grafana-dashboard"].relation_joined, self._publish_dashboard_info
- )
-
- def _publish_scrape_info(self, event) -> NoReturn:
- """Publishes scraping information for Prometheus.
-
- Args:
- event (EventBase): Prometheus relation event.
- """
- if self.unit.is_leader():
- hostname = (
- urlparse(self.model.config["site_url"]).hostname
- if self.model.config["site_url"]
- else self.model.app.name
- )
- port = str(PORT)
- if self.model.config.get("site_url", "").startswith("https://"):
- port = "443"
- elif self.model.config.get("site_url", "").startswith("http://"):
- port = "80"
-
- self.scrape_target.publish_info(
- hostname=hostname,
- port=port,
- metrics_path="/metrics",
- scrape_interval="30s",
- scrape_timeout="15s",
- )
-
- def _publish_dashboard_info(self, event) -> NoReturn:
- """Publish dashboards for Grafana.
-
- Args:
- event (EventBase): Grafana relation event.
- """
- if self.unit.is_leader():
- self.dashboard_target.publish_info(
- name="osm-kafka",
- dashboard=Path("templates/kafka_exporter_dashboard.json").read_text(),
- )
-
- def _is_kafka_endpoint_set(self, config: ConfigModel) -> bool:
- """Check if Kafka endpoint is set."""
- return config.kafka_endpoint or self._is_kafka_relation_set()
-
- def _is_kafka_relation_set(self) -> bool:
- """Check if the Kafka relation is set or not."""
- return self.kafka.host and self.kafka.port
-
- @property
- def kafka_endpoint(self) -> KafkaEndpoint:
- config = ConfigModel(**dict(self.config))
- if config.kafka_endpoint:
- host, port = config.kafka_endpoint.split(":")
- else:
- host = self.kafka.host
- port = self.kafka.port
- return KafkaEndpoint(host, port)
-
- def build_pod_spec(self, image_info):
- """Build the PodSpec to be used.
-
- Args:
- image_info (str): container image information.
-
- Returns:
- Dict: PodSpec information.
- """
- # Validate config
- config = ConfigModel(**dict(self.config))
-
- # Check relations
- if not self._is_kafka_endpoint_set(config):
- raise RelationsMissing(["kafka"])
-
- # Create Builder for the PodSpec
- pod_spec_builder = PodSpecV3Builder(
- enable_security_context=config.security_context
- )
-
- # Build container
- container_builder = ContainerV3Builder(
- self.app.name,
- image_info,
- config.image_pull_policy,
- run_as_non_root=config.security_context,
- )
- container_builder.add_port(name="exporter", port=PORT)
- container_builder.add_http_readiness_probe(
- path="/api/health",
- port=PORT,
- initial_delay_seconds=10,
- period_seconds=10,
- timeout_seconds=5,
- success_threshold=1,
- failure_threshold=3,
- )
- container_builder.add_http_liveness_probe(
- path="/api/health",
- port=PORT,
- initial_delay_seconds=60,
- timeout_seconds=30,
- failure_threshold=10,
- )
- container_builder.add_command(
- [
- "kafka_exporter",
- f"--kafka.server={self.kafka_endpoint.host}:{self.kafka_endpoint.port}",
- ]
- )
- container = container_builder.build()
-
- # Add container to PodSpec
- pod_spec_builder.add_container(container)
-
- # Add ingress resources to PodSpec if site url exists
- if config.site_url:
- parsed = urlparse(config.site_url)
- annotations = {}
- if config.ingress_class:
- annotations["kubernetes.io/ingress.class"] = config.ingress_class
- ingress_resource_builder = IngressResourceV3Builder(
- f"{self.app.name}-ingress", annotations
- )
-
- if config.ingress_whitelist_source_range:
- annotations[
- "nginx.ingress.kubernetes.io/whitelist-source-range"
- ] = config.ingress_whitelist_source_range
-
- if config.cluster_issuer:
- annotations["cert-manager.io/cluster-issuer"] = config.cluster_issuer
-
- if parsed.scheme == "https":
- ingress_resource_builder.add_tls(
- [parsed.hostname], config.tls_secret_name
- )
- else:
- annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false"
-
- ingress_resource_builder.add_rule(parsed.hostname, self.app.name, PORT)
- ingress_resource = ingress_resource_builder.build()
- pod_spec_builder.add_ingress_resource(ingress_resource)
-
- return pod_spec_builder.build()
-
-
-if __name__ == "__main__":
- main(KafkaExporterCharm)
diff --git a/installers/charm/kafka-exporter/src/pod_spec.py b/installers/charm/kafka-exporter/src/pod_spec.py
deleted file mode 100644
index 214d6529..00000000
--- a/installers/charm/kafka-exporter/src/pod_spec.py
+++ /dev/null
@@ -1,314 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-from ipaddress import ip_network
-import logging
-from typing import Any, Dict, List
-from urllib.parse import urlparse
-
-logger = logging.getLogger(__name__)
-
-
-def _validate_ip_network(network: str) -> bool:
- """Validate IP network.
-
- Args:
- network (str): IP network range.
-
- Returns:
- bool: True if valid, false otherwise.
- """
- if not network:
- return True
-
- try:
- ip_network(network)
- except ValueError:
- return False
-
- return True
-
-
-def _validate_data(config_data: Dict[str, Any], relation_data: Dict[str, Any]) -> bool:
- """Validates passed information.
-
- Args:
- config_data (Dict[str, Any]): configuration information.
- relation_data (Dict[str, Any]): relation information
-
- Raises:
- ValueError: when config and/or relation data is not valid.
- """
- config_validators = {
- "site_url": lambda value, _: isinstance(value, str)
- if value is not None
- else True,
- "cluster_issuer": lambda value, _: isinstance(value, str)
- if value is not None
- else True,
- "ingress_whitelist_source_range": lambda value, _: _validate_ip_network(value),
- "tls_secret_name": lambda value, _: isinstance(value, str)
- if value is not None
- else True,
- }
- relation_validators = {
- "kafka_host": lambda value, _: isinstance(value, str) and len(value) > 0,
- "kafka_port": lambda value, _: isinstance(value, str)
- and len(value) > 0
- and int(value) > 0,
- }
- problems = []
-
- for key, validator in config_validators.items():
- valid = validator(config_data.get(key), config_data)
-
- if not valid:
- problems.append(key)
-
- for key, validator in relation_validators.items():
- valid = validator(relation_data.get(key), relation_data)
-
- if not valid:
- problems.append(key)
-
- if len(problems) > 0:
- raise ValueError("Errors found in: {}".format(", ".join(problems)))
-
- return True
-
-
-def _make_pod_ports(port: int) -> List[Dict[str, Any]]:
- """Generate pod ports details.
-
- Args:
- port (int): port to expose.
-
- Returns:
- List[Dict[str, Any]]: pod port details.
- """
- return [{"name": "kafka-exporter", "containerPort": port, "protocol": "TCP"}]
-
-
-def _make_pod_envconfig(
- config: Dict[str, Any], relation_state: Dict[str, Any]
-) -> Dict[str, Any]:
- """Generate pod environment configuration.
-
- Args:
- config (Dict[str, Any]): configuration information.
- relation_state (Dict[str, Any]): relation state information.
-
- Returns:
- Dict[str, Any]: pod environment configuration.
- """
- envconfig = {}
-
- return envconfig
-
-
-def _make_pod_ingress_resources(
- config: Dict[str, Any], app_name: str, port: int
-) -> List[Dict[str, Any]]:
- """Generate pod ingress resources.
-
- Args:
- config (Dict[str, Any]): configuration information.
- app_name (str): application name.
- port (int): port to expose.
-
- Returns:
- List[Dict[str, Any]]: pod ingress resources.
- """
- site_url = config.get("site_url")
-
- if not site_url:
- return
-
- parsed = urlparse(site_url)
-
- if not parsed.scheme.startswith("http"):
- return
-
- ingress_whitelist_source_range = config["ingress_whitelist_source_range"]
- cluster_issuer = config["cluster_issuer"]
-
- annotations = {}
-
- if ingress_whitelist_source_range:
- annotations[
- "nginx.ingress.kubernetes.io/whitelist-source-range"
- ] = ingress_whitelist_source_range
-
- if cluster_issuer:
- annotations["cert-manager.io/cluster-issuer"] = cluster_issuer
-
- ingress_spec_tls = None
-
- if parsed.scheme == "https":
- ingress_spec_tls = [{"hosts": [parsed.hostname]}]
- tls_secret_name = config["tls_secret_name"]
- if tls_secret_name:
- ingress_spec_tls[0]["secretName"] = tls_secret_name
- else:
- annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false"
-
- ingress = {
- "name": "{}-ingress".format(app_name),
- "annotations": annotations,
- "spec": {
- "rules": [
- {
- "host": parsed.hostname,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ]
- },
- }
- if ingress_spec_tls:
- ingress["spec"]["tls"] = ingress_spec_tls
-
- return [ingress]
-
-
-def _make_readiness_probe(port: int) -> Dict[str, Any]:
- """Generate readiness probe.
-
- Args:
- port (int): service port.
-
- Returns:
- Dict[str, Any]: readiness probe.
- """
- return {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 10,
- "periodSeconds": 10,
- "timeoutSeconds": 5,
- "successThreshold": 1,
- "failureThreshold": 3,
- }
-
-
-def _make_liveness_probe(port: int) -> Dict[str, Any]:
- """Generate liveness probe.
-
- Args:
- port (int): service port.
-
- Returns:
- Dict[str, Any]: liveness probe.
- """
- return {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 60,
- "timeoutSeconds": 30,
- "failureThreshold": 10,
- }
-
-
-def _make_pod_command(relation: Dict[str, Any]) -> List[str]:
- """Generate the startup command.
-
- Args:
- relation (Dict[str, Any]): Relation information.
-
- Returns:
- List[str]: command to startup the process.
- """
- command = [
- "kafka_exporter",
- "--kafka.server={}:{}".format(
- relation.get("kafka_host"), relation.get("kafka_port")
- ),
- ]
-
- return command
-
-
-def make_pod_spec(
- image_info: Dict[str, str],
- config: Dict[str, Any],
- relation_state: Dict[str, Any],
- app_name: str = "kafka-exporter",
- port: int = 9308,
-) -> Dict[str, Any]:
- """Generate the pod spec information.
-
- Args:
- image_info (Dict[str, str]): Object provided by
- OCIImageResource("image").fetch().
- config (Dict[str, Any]): Configuration information.
- relation_state (Dict[str, Any]): Relation state information.
- app_name (str, optional): Application name. Defaults to "ro".
- port (int, optional): Port for the container. Defaults to 9090.
-
- Returns:
- Dict[str, Any]: Pod spec dictionary for the charm.
- """
- if not image_info:
- return None
-
- _validate_data(config, relation_state)
-
- ports = _make_pod_ports(port)
- env_config = _make_pod_envconfig(config, relation_state)
- readiness_probe = _make_readiness_probe(port)
- liveness_probe = _make_liveness_probe(port)
- ingress_resources = _make_pod_ingress_resources(config, app_name, port)
- command = _make_pod_command(relation_state)
-
- return {
- "version": 3,
- "containers": [
- {
- "name": app_name,
- "imageDetails": image_info,
- "imagePullPolicy": "Always",
- "ports": ports,
- "envConfig": env_config,
- "command": command,
- "kubernetes": {
- "readinessProbe": readiness_probe,
- "livenessProbe": liveness_probe,
- },
- }
- ],
- "kubernetesResources": {
- "ingressResources": ingress_resources or [],
- },
- }
diff --git a/installers/charm/kafka-exporter/templates/kafka_exporter_dashboard.json b/installers/charm/kafka-exporter/templates/kafka_exporter_dashboard.json
deleted file mode 100644
index 5b7552ad..00000000
--- a/installers/charm/kafka-exporter/templates/kafka_exporter_dashboard.json
+++ /dev/null
@@ -1,609 +0,0 @@
-{
- "annotations": {
- "list": [
- {
- "builtIn": 1,
- "datasource": "-- Grafana --",
- "enable": true,
- "hide": true,
- "iconColor": "rgba(0, 211, 255, 1)",
- "name": "Annotations & Alerts",
- "type": "dashboard"
- }
- ]
- },
- "description": "Kafka resource usage and throughput",
- "editable": true,
- "gnetId": 7589,
- "graphTooltip": 0,
- "id": 10,
- "iteration": 1578848023483,
- "links": [],
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "prometheus - Juju generated source",
- "fill": 0,
- "fillGradient": 0,
- "gridPos": {
- "h": 10,
- "w": 10,
- "x": 0,
- "y": 0
- },
- "id": 14,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": true,
- "min": false,
- "rightSide": false,
- "show": true,
- "sideWidth": 480,
- "sort": "max",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "connected",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(kafka_topic_partition_current_offset - kafka_topic_partition_oldest_offset{instance=\"$instance\", topic=~\"$topic\"}) by (topic)",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{topic}}",
- "refId": "B"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Messages stored per topic",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "prometheus - Juju generated source",
- "fill": 0,
- "fillGradient": 0,
- "gridPos": {
- "h": 10,
- "w": 10,
- "x": 10,
- "y": 0
- },
- "id": 12,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": true,
- "min": false,
- "rightSide": false,
- "show": true,
- "sideWidth": 480,
- "sort": "max",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "connected",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(kafka_consumergroup_lag{instance=\"$instance\",topic=~\"$topic\"}) by (consumergroup, topic) ",
- "format": "time_series",
- "instant": false,
- "interval": "",
- "intervalFactor": 1,
- "legendFormat": " {{topic}} ({{consumergroup}})",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Lag by Consumer Group",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": "",
- "logBase": 1,
- "max": null,
- "min": "0",
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "prometheus - Juju generated source",
- "fill": 0,
- "fillGradient": 0,
- "gridPos": {
- "h": 10,
- "w": 10,
- "x": 0,
- "y": 10
- },
- "id": 16,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": true,
- "min": false,
- "rightSide": false,
- "show": true,
- "sideWidth": 480,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "connected",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(delta(kafka_topic_partition_current_offset{instance=~'$instance', topic=~\"$topic\"}[5m])/5) by (topic)",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{topic}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Messages produced per minute",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "prometheus - Juju generated source",
- "fill": 0,
- "fillGradient": 0,
- "gridPos": {
- "h": 10,
- "w": 10,
- "x": 10,
- "y": 10
- },
- "id": 18,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": true,
- "min": false,
- "rightSide": false,
- "show": true,
- "sideWidth": 480,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "connected",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(delta(kafka_consumergroup_current_offset{instance=~'$instance',topic=~\"$topic\"}[5m])/5) by (consumergroup, topic)",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": " {{topic}} ({{consumergroup}})",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Messages consumed per minute",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": true,
- "dashLength": 10,
- "dashes": false,
- "datasource": "prometheus - Juju generated source",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 20,
- "x": 0,
- "y": 20
- },
- "id": 8,
- "legend": {
- "alignAsTable": true,
- "avg": false,
- "current": true,
- "max": false,
- "min": false,
- "rightSide": true,
- "show": true,
- "sideWidth": 420,
- "total": false,
- "values": true
- },
- "lines": false,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum by(topic) (kafka_topic_partitions{instance=\"$instance\",topic=~\"$topic\"})",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{topic}}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Partitions per Topic",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "series",
- "name": null,
- "show": false,
- "values": [
- "current"
- ]
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- }
- ],
- "refresh": "5s",
- "schemaVersion": 19,
- "style": "dark",
- "tags": [],
- "templating": {
- "list": [
- {
- "allValue": null,
- "current": {
- "text": "osm-kafka-exporter-service",
- "value": "osm-kafka-exporter-service"
- },
- "datasource": "prometheus - Juju generated source",
- "definition": "",
- "hide": 0,
- "includeAll": false,
- "label": "Job",
- "multi": false,
- "name": "job",
- "options": [],
- "query": "label_values(kafka_consumergroup_current_offset, job)",
- "refresh": 1,
- "regex": "",
- "skipUrlSync": false,
- "sort": 0,
- "tagValuesQuery": "",
- "tags": [],
- "tagsQuery": "",
- "type": "query",
- "useTags": false
- },
- {
- "allValue": null,
- "datasource": "prometheus - Juju generated source",
- "definition": "",
- "hide": 0,
- "includeAll": false,
- "label": "Instance",
- "multi": false,
- "name": "instance",
- "options": [],
- "query": "label_values(kafka_consumergroup_current_offset{job=~\"$job\"}, instance)",
- "refresh": 1,
- "regex": "",
- "skipUrlSync": false,
- "sort": 0,
- "tagValuesQuery": "",
- "tags": [],
- "tagsQuery": "",
- "type": "query",
- "useTags": false
- },
- {
- "allValue": null,
- "current": {
- "tags": [],
- "text": "All",
- "value": [
- "$__all"
- ]
- },
- "datasource": "prometheus - Juju generated source",
- "definition": "",
- "hide": 0,
- "includeAll": true,
- "label": "Topic",
- "multi": true,
- "name": "topic",
- "options": [],
- "query": "label_values(kafka_topic_partition_current_offset{instance='$instance',topic!='__consumer_offsets',topic!='--kafka'}, topic)",
- "refresh": 1,
- "regex": "",
- "skipUrlSync": false,
- "sort": 1,
- "tagValuesQuery": "",
- "tags": [],
- "tagsQuery": "topic",
- "type": "query",
- "useTags": false
- }
- ]
- },
- "time": {
- "from": "now-1h",
- "to": "now"
- },
- "timepicker": {
- "refresh_intervals": [
- "5s",
- "10s",
- "30s",
- "1m",
- "5m",
- "15m",
- "30m",
- "1h",
- "2h",
- "1d"
- ],
- "time_options": [
- "5m",
- "15m",
- "1h",
- "6h",
- "12h",
- "24h",
- "2d",
- "7d",
- "30d"
- ]
- },
- "timezone": "browser",
- "title": "Kafka",
- "uid": "jwPKIsniz",
- "version": 2
-}
diff --git a/installers/charm/kafka-exporter/tests/__init__.py b/installers/charm/kafka-exporter/tests/__init__.py
deleted file mode 100644
index 90dc417c..00000000
--- a/installers/charm/kafka-exporter/tests/__init__.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-"""Init mocking for unit tests."""
-
-import sys
-
-import mock
-
-
-class OCIImageResourceErrorMock(Exception):
- pass
-
-
-sys.path.append("src")
-
-oci_image = mock.MagicMock()
-oci_image.OCIImageResourceError = OCIImageResourceErrorMock
-sys.modules["oci_image"] = oci_image
-sys.modules["oci_image"].OCIImageResource().fetch.return_value = {}
diff --git a/installers/charm/kafka-exporter/tests/test_charm.py b/installers/charm/kafka-exporter/tests/test_charm.py
deleted file mode 100644
index c00943b8..00000000
--- a/installers/charm/kafka-exporter/tests/test_charm.py
+++ /dev/null
@@ -1,554 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-import sys
-from typing import NoReturn
-import unittest
-
-
-from charm import KafkaExporterCharm
-from ops.model import ActiveStatus, BlockedStatus
-from ops.testing import Harness
-
-
-class TestCharm(unittest.TestCase):
- """Kafka Exporter Charm unit tests."""
-
- def setUp(self) -> NoReturn:
- """Test setup"""
- self.image_info = sys.modules["oci_image"].OCIImageResource().fetch()
- self.harness = Harness(KafkaExporterCharm)
- self.harness.set_leader(is_leader=True)
- self.harness.begin()
- self.config = {
- "ingress_whitelist_source_range": "",
- "tls_secret_name": "",
- "site_url": "https://kafka-exporter.192.168.100.100.nip.io",
- "cluster_issuer": "vault-issuer",
- }
- self.harness.update_config(self.config)
-
- def test_config_changed_no_relations(
- self,
- ) -> NoReturn:
- """Test ingress resources without HTTP."""
-
- self.harness.charm.on.config_changed.emit()
-
- # Assertions
- self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
- print(self.harness.charm.unit.status.message)
- self.assertTrue(
- all(
- relation in self.harness.charm.unit.status.message
- for relation in ["kafka"]
- )
- )
-
- def test_config_changed_non_leader(
- self,
- ) -> NoReturn:
- """Test ingress resources without HTTP."""
- self.harness.set_leader(is_leader=False)
- self.harness.charm.on.config_changed.emit()
-
- # Assertions
- self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)
-
- def test_with_relations(
- self,
- ) -> NoReturn:
- "Test with relations"
- self.initialize_kafka_relation()
-
- # Verifying status
- self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
- def initialize_kafka_relation(self):
- kafka_relation_id = self.harness.add_relation("kafka", "kafka")
- self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
- self.harness.update_relation_data(
- kafka_relation_id, "kafka", {"host": "kafka", "port": 9092}
- )
-
-
-if __name__ == "__main__":
- unittest.main()
-
-
-# class TestCharm(unittest.TestCase):
-# """Kafka Exporter Charm unit tests."""
-#
-# def setUp(self) -> NoReturn:
-# """Test setup"""
-# self.harness = Harness(KafkaExporterCharm)
-# self.harness.set_leader(is_leader=True)
-# self.harness.begin()
-#
-# def test_on_start_without_relations(self) -> NoReturn:
-# """Test installation without any relation."""
-# self.harness.charm.on.start.emit()
-#
-# # Verifying status
-# self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-#
-# # Verifying status message
-# self.assertGreater(len(self.harness.charm.unit.status.message), 0)
-# self.assertTrue(
-# self.harness.charm.unit.status.message.startswith("Waiting for ")
-# )
-# self.assertIn("kafka", self.harness.charm.unit.status.message)
-# self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation"))
-#
-# def test_on_start_with_relations_without_http(self) -> NoReturn:
-# """Test deployment."""
-# expected_result = {
-# "version": 3,
-# "containers": [
-# {
-# "name": "kafka-exporter",
-# "imageDetails": self.harness.charm.image.fetch(),
-# "imagePullPolicy": "Always",
-# "ports": [
-# {
-# "name": "kafka-exporter",
-# "containerPort": 9308,
-# "protocol": "TCP",
-# }
-# ],
-# "envConfig": {},
-# "command": ["kafka_exporter", "--kafka.server=kafka:9090"],
-# "kubernetes": {
-# "readinessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9308,
-# },
-# "initialDelaySeconds": 10,
-# "periodSeconds": 10,
-# "timeoutSeconds": 5,
-# "successThreshold": 1,
-# "failureThreshold": 3,
-# },
-# "livenessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9308,
-# },
-# "initialDelaySeconds": 60,
-# "timeoutSeconds": 30,
-# "failureThreshold": 10,
-# },
-# },
-# },
-# ],
-# "kubernetesResources": {"ingressResources": []},
-# }
-#
-# self.harness.charm.on.start.emit()
-#
-# # Initializing the kafka relation
-# relation_id = self.harness.add_relation("kafka", "kafka")
-# self.harness.add_relation_unit(relation_id, "kafka/0")
-# self.harness.update_relation_data(
-# relation_id,
-# "kafka/0",
-# {
-# "host": "kafka",
-# "port": "9090",
-# },
-# )
-#
-# # Verifying status
-# self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
-#
-# pod_spec, _ = self.harness.get_pod_spec()
-#
-# self.assertDictEqual(expected_result, pod_spec)
-#
-# def test_ingress_resources_with_http(self) -> NoReturn:
-# """Test ingress resources with HTTP."""
-# expected_result = {
-# "version": 3,
-# "containers": [
-# {
-# "name": "kafka-exporter",
-# "imageDetails": self.harness.charm.image.fetch(),
-# "imagePullPolicy": "Always",
-# "ports": [
-# {
-# "name": "kafka-exporter",
-# "containerPort": 9308,
-# "protocol": "TCP",
-# }
-# ],
-# "envConfig": {},
-# "command": ["kafka_exporter", "--kafka.server=kafka:9090"],
-# "kubernetes": {
-# "readinessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9308,
-# },
-# "initialDelaySeconds": 10,
-# "periodSeconds": 10,
-# "timeoutSeconds": 5,
-# "successThreshold": 1,
-# "failureThreshold": 3,
-# },
-# "livenessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9308,
-# },
-# "initialDelaySeconds": 60,
-# "timeoutSeconds": 30,
-# "failureThreshold": 10,
-# },
-# },
-# },
-# ],
-# "kubernetesResources": {
-# "ingressResources": [
-# {
-# "name": "kafka-exporter-ingress",
-# "annotations": {
-# "nginx.ingress.kubernetes.io/ssl-redirect": "false",
-# },
-# "spec": {
-# "rules": [
-# {
-# "host": "kafka-exporter",
-# "http": {
-# "paths": [
-# {
-# "path": "/",
-# "backend": {
-# "serviceName": "kafka-exporter",
-# "servicePort": 9308,
-# },
-# }
-# ]
-# },
-# }
-# ]
-# },
-# }
-# ],
-# },
-# }
-#
-# self.harness.charm.on.start.emit()
-#
-# # Initializing the kafka relation
-# relation_id = self.harness.add_relation("kafka", "kafka")
-# self.harness.add_relation_unit(relation_id, "kafka/0")
-# self.harness.update_relation_data(
-# relation_id,
-# "kafka/0",
-# {
-# "host": "kafka",
-# "port": "9090",
-# },
-# )
-#
-# self.harness.update_config({"site_url": "http://kafka-exporter"})
-#
-# pod_spec, _ = self.harness.get_pod_spec()
-#
-# self.assertDictEqual(expected_result, pod_spec)
-#
-# def test_ingress_resources_with_https(self) -> NoReturn:
-# """Test ingress resources with HTTPS."""
-# expected_result = {
-# "version": 3,
-# "containers": [
-# {
-# "name": "kafka-exporter",
-# "imageDetails": self.harness.charm.image.fetch(),
-# "imagePullPolicy": "Always",
-# "ports": [
-# {
-# "name": "kafka-exporter",
-# "containerPort": 9308,
-# "protocol": "TCP",
-# }
-# ],
-# "envConfig": {},
-# "command": ["kafka_exporter", "--kafka.server=kafka:9090"],
-# "kubernetes": {
-# "readinessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9308,
-# },
-# "initialDelaySeconds": 10,
-# "periodSeconds": 10,
-# "timeoutSeconds": 5,
-# "successThreshold": 1,
-# "failureThreshold": 3,
-# },
-# "livenessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9308,
-# },
-# "initialDelaySeconds": 60,
-# "timeoutSeconds": 30,
-# "failureThreshold": 10,
-# },
-# },
-# },
-# ],
-# "kubernetesResources": {
-# "ingressResources": [
-# {
-# "name": "kafka-exporter-ingress",
-# "annotations": {},
-# "spec": {
-# "rules": [
-# {
-# "host": "kafka-exporter",
-# "http": {
-# "paths": [
-# {
-# "path": "/",
-# "backend": {
-# "serviceName": "kafka-exporter",
-# "servicePort": 9308,
-# },
-# }
-# ]
-# },
-# }
-# ],
-# "tls": [
-# {
-# "hosts": ["kafka-exporter"],
-# "secretName": "kafka-exporter",
-# }
-# ],
-# },
-# }
-# ],
-# },
-# }
-#
-# self.harness.charm.on.start.emit()
-#
-# # Initializing the kafka relation
-# relation_id = self.harness.add_relation("kafka", "kafka")
-# self.harness.add_relation_unit(relation_id, "kafka/0")
-# self.harness.update_relation_data(
-# relation_id,
-# "kafka/0",
-# {
-# "host": "kafka",
-# "port": "9090",
-# },
-# )
-#
-# self.harness.update_config(
-# {
-# "site_url": "https://kafka-exporter",
-# "tls_secret_name": "kafka-exporter",
-# }
-# )
-#
-# pod_spec, _ = self.harness.get_pod_spec()
-#
-# self.assertDictEqual(expected_result, pod_spec)
-#
-# def test_ingress_resources_with_https_and_ingress_whitelist(self) -> NoReturn:
-# """Test ingress resources with HTTPS and ingress whitelist."""
-# expected_result = {
-# "version": 3,
-# "containers": [
-# {
-# "name": "kafka-exporter",
-# "imageDetails": self.harness.charm.image.fetch(),
-# "imagePullPolicy": "Always",
-# "ports": [
-# {
-# "name": "kafka-exporter",
-# "containerPort": 9308,
-# "protocol": "TCP",
-# }
-# ],
-# "envConfig": {},
-# "command": ["kafka_exporter", "--kafka.server=kafka:9090"],
-# "kubernetes": {
-# "readinessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9308,
-# },
-# "initialDelaySeconds": 10,
-# "periodSeconds": 10,
-# "timeoutSeconds": 5,
-# "successThreshold": 1,
-# "failureThreshold": 3,
-# },
-# "livenessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9308,
-# },
-# "initialDelaySeconds": 60,
-# "timeoutSeconds": 30,
-# "failureThreshold": 10,
-# },
-# },
-# },
-# ],
-# "kubernetesResources": {
-# "ingressResources": [
-# {
-# "name": "kafka-exporter-ingress",
-# "annotations": {
-# "nginx.ingress.kubernetes.io/whitelist-source-range": "0.0.0.0/0",
-# },
-# "spec": {
-# "rules": [
-# {
-# "host": "kafka-exporter",
-# "http": {
-# "paths": [
-# {
-# "path": "/",
-# "backend": {
-# "serviceName": "kafka-exporter",
-# "servicePort": 9308,
-# },
-# }
-# ]
-# },
-# }
-# ],
-# "tls": [
-# {
-# "hosts": ["kafka-exporter"],
-# "secretName": "kafka-exporter",
-# }
-# ],
-# },
-# }
-# ],
-# },
-# }
-#
-# self.harness.charm.on.start.emit()
-#
-# # Initializing the kafka relation
-# relation_id = self.harness.add_relation("kafka", "kafka")
-# self.harness.add_relation_unit(relation_id, "kafka/0")
-# self.harness.update_relation_data(
-# relation_id,
-# "kafka/0",
-# {
-# "host": "kafka",
-# "port": "9090",
-# },
-# )
-#
-# self.harness.update_config(
-# {
-# "site_url": "https://kafka-exporter",
-# "tls_secret_name": "kafka-exporter",
-# "ingress_whitelist_source_range": "0.0.0.0/0",
-# }
-# )
-#
-# pod_spec, _ = self.harness.get_pod_spec()
-#
-# self.assertDictEqual(expected_result, pod_spec)
-#
-# def test_on_kafka_unit_relation_changed(self) -> NoReturn:
-# """Test to see if kafka relation is updated."""
-# self.harness.charm.on.start.emit()
-#
-# relation_id = self.harness.add_relation("kafka", "kafka")
-# self.harness.add_relation_unit(relation_id, "kafka/0")
-# self.harness.update_relation_data(
-# relation_id,
-# "kafka/0",
-# {
-# "host": "kafka",
-# "port": "9090",
-# },
-# )
-#
-# # Verifying status
-# self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
-#
-# def test_publish_target_info(self) -> NoReturn:
-# """Test to see if target relation is updated."""
-# expected_result = {
-# "hostname": "kafka-exporter",
-# "port": "9308",
-# "metrics_path": "/metrics",
-# "scrape_interval": "30s",
-# "scrape_timeout": "15s",
-# }
-#
-# self.harness.charm.on.start.emit()
-#
-# relation_id = self.harness.add_relation("prometheus-scrape", "prometheus")
-# self.harness.add_relation_unit(relation_id, "prometheus/0")
-# relation_data = self.harness.get_relation_data(relation_id, "kafka-exporter/0")
-#
-# self.assertDictEqual(expected_result, relation_data)
-#
-# def test_publish_target_info_with_site_url(self) -> NoReturn:
-# """Test to see if target relation is updated."""
-# expected_result = {
-# "hostname": "kafka-exporter-osm",
-# "port": "80",
-# "metrics_path": "/metrics",
-# "scrape_interval": "30s",
-# "scrape_timeout": "15s",
-# }
-#
-# self.harness.charm.on.start.emit()
-#
-# self.harness.update_config({"site_url": "http://kafka-exporter-osm"})
-#
-# relation_id = self.harness.add_relation("prometheus-scrape", "prometheus")
-# self.harness.add_relation_unit(relation_id, "prometheus/0")
-# relation_data = self.harness.get_relation_data(relation_id, "kafka-exporter/0")
-#
-# self.assertDictEqual(expected_result, relation_data)
-#
-# def test_publish_dashboard_info(self) -> NoReturn:
-# """Test to see if dashboard relation is updated."""
-# self.harness.charm.on.start.emit()
-#
-# relation_id = self.harness.add_relation("grafana-dashboard", "grafana")
-# self.harness.add_relation_unit(relation_id, "grafana/0")
-# relation_data = self.harness.get_relation_data(relation_id, "kafka-exporter/0")
-#
-# self.assertTrue("dashboard" in relation_data)
-# self.assertTrue(len(relation_data["dashboard"]) > 0)
-#
-#
-# if __name__ == "__main__":
-# unittest.main()
diff --git a/installers/charm/kafka-exporter/tests/test_pod_spec.py b/installers/charm/kafka-exporter/tests/test_pod_spec.py
deleted file mode 100644
index ad0e412f..00000000
--- a/installers/charm/kafka-exporter/tests/test_pod_spec.py
+++ /dev/null
@@ -1,509 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-from typing import NoReturn
-import unittest
-
-import pod_spec
-
-
-class TestPodSpec(unittest.TestCase):
- """Pod spec unit tests."""
-
- def test_make_pod_ports(self) -> NoReturn:
- """Testing make pod ports."""
- port = 9308
-
- expected_result = [
- {
- "name": "kafka-exporter",
- "containerPort": port,
- "protocol": "TCP",
- }
- ]
-
- pod_ports = pod_spec._make_pod_ports(port)
-
- self.assertListEqual(expected_result, pod_ports)
-
- def test_make_pod_envconfig(self) -> NoReturn:
- """Teting make pod envconfig."""
- config = {}
- relation_state = {}
-
- expected_result = {}
-
- pod_envconfig = pod_spec._make_pod_envconfig(config, relation_state)
-
- self.assertDictEqual(expected_result, pod_envconfig)
-
- def test_make_pod_ingress_resources_without_site_url(self) -> NoReturn:
- """Testing make pod ingress resources without site_url."""
- config = {
- "cluster_issuer": "",
- "site_url": "",
- }
- app_name = "kafka-exporter"
- port = 9308
-
- pod_ingress_resources = pod_spec._make_pod_ingress_resources(
- config, app_name, port
- )
-
- self.assertIsNone(pod_ingress_resources)
-
- def test_make_pod_ingress_resources(self) -> NoReturn:
- """Testing make pod ingress resources."""
- config = {
- "cluster_issuer": "",
- "site_url": "http://kafka-exporter",
- "ingress_whitelist_source_range": "",
- }
- app_name = "kafka-exporter"
- port = 9308
-
- expected_result = [
- {
- "name": f"{app_name}-ingress",
- "annotations": {
- "nginx.ingress.kubernetes.io/ssl-redirect": "false",
- },
- "spec": {
- "rules": [
- {
- "host": app_name,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ]
- },
- }
- ]
-
- pod_ingress_resources = pod_spec._make_pod_ingress_resources(
- config, app_name, port
- )
-
- self.assertListEqual(expected_result, pod_ingress_resources)
-
- def test_make_pod_ingress_resources_with_whitelist_source_range(self) -> NoReturn:
- """Testing make pod ingress resources with whitelist_source_range."""
- config = {
- "site_url": "http://kafka-exporter",
- "cluster_issuer": "",
- "ingress_whitelist_source_range": "0.0.0.0/0",
- }
- app_name = "kafka-exporter"
- port = 9308
-
- expected_result = [
- {
- "name": f"{app_name}-ingress",
- "annotations": {
- "nginx.ingress.kubernetes.io/ssl-redirect": "false",
- "nginx.ingress.kubernetes.io/whitelist-source-range": config[
- "ingress_whitelist_source_range"
- ],
- },
- "spec": {
- "rules": [
- {
- "host": app_name,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ]
- },
- }
- ]
-
- pod_ingress_resources = pod_spec._make_pod_ingress_resources(
- config, app_name, port
- )
-
- self.assertListEqual(expected_result, pod_ingress_resources)
-
- def test_make_pod_ingress_resources_with_https(self) -> NoReturn:
- """Testing make pod ingress resources with HTTPs."""
- config = {
- "site_url": "https://kafka-exporter",
- "max_file_size": 0,
- "cluster_issuer": "",
- "ingress_whitelist_source_range": "",
- "tls_secret_name": "",
- }
- app_name = "kafka-exporter"
- port = 9308
-
- expected_result = [
- {
- "name": f"{app_name}-ingress",
- "annotations": {},
- "spec": {
- "rules": [
- {
- "host": app_name,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ],
- "tls": [{"hosts": [app_name]}],
- },
- }
- ]
-
- pod_ingress_resources = pod_spec._make_pod_ingress_resources(
- config, app_name, port
- )
-
- self.assertListEqual(expected_result, pod_ingress_resources)
-
- def test_make_pod_ingress_resources_with_https_tls_secret_name(self) -> NoReturn:
- """Testing make pod ingress resources with HTTPs and TLS secret name."""
- config = {
- "site_url": "https://kafka-exporter",
- "max_file_size": 0,
- "cluster_issuer": "",
- "ingress_whitelist_source_range": "",
- "tls_secret_name": "secret_name",
- }
- app_name = "kafka-exporter"
- port = 9308
-
- expected_result = [
- {
- "name": f"{app_name}-ingress",
- "annotations": {},
- "spec": {
- "rules": [
- {
- "host": app_name,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ],
- "tls": [
- {"hosts": [app_name], "secretName": config["tls_secret_name"]}
- ],
- },
- }
- ]
-
- pod_ingress_resources = pod_spec._make_pod_ingress_resources(
- config, app_name, port
- )
-
- self.assertListEqual(expected_result, pod_ingress_resources)
-
- def test_make_readiness_probe(self) -> NoReturn:
- """Testing make readiness probe."""
- port = 9308
-
- expected_result = {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 10,
- "periodSeconds": 10,
- "timeoutSeconds": 5,
- "successThreshold": 1,
- "failureThreshold": 3,
- }
-
- readiness_probe = pod_spec._make_readiness_probe(port)
-
- self.assertDictEqual(expected_result, readiness_probe)
-
- def test_make_liveness_probe(self) -> NoReturn:
- """Testing make liveness probe."""
- port = 9308
-
- expected_result = {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 60,
- "timeoutSeconds": 30,
- "failureThreshold": 10,
- }
-
- liveness_probe = pod_spec._make_liveness_probe(port)
-
- self.assertDictEqual(expected_result, liveness_probe)
-
- def test_make_pod_command(self) -> NoReturn:
- """Testing make pod command."""
- relation = {
- "kakfa_host": "kafka",
- "kafka_port": "9090",
- }
-
- expected_result = [
- "kafka_exporter",
- "--kafka.server={}:{}".format(
- relation.get("kafka_host"), relation.get("kafka_port")
- ),
- ]
-
- pod_envconfig = pod_spec._make_pod_command(relation)
-
- self.assertListEqual(expected_result, pod_envconfig)
-
- def test_make_pod_spec(self) -> NoReturn:
- """Testing make pod spec."""
- image_info = {"upstream-source": "bitnami/kafka-exporter:latest"}
- config = {
- "site_url": "",
- "cluster_issuer": "",
- }
- relation_state = {
- "kafka_host": "kafka",
- "kafka_port": "9090",
- }
- app_name = "kafka-exporter"
- port = 9308
-
- expected_result = {
- "version": 3,
- "containers": [
- {
- "name": app_name,
- "imageDetails": image_info,
- "imagePullPolicy": "Always",
- "ports": [
- {
- "name": app_name,
- "containerPort": port,
- "protocol": "TCP",
- }
- ],
- "envConfig": {},
- "command": ["kafka_exporter", "--kafka.server=kafka:9090"],
- "kubernetes": {
- "readinessProbe": {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 10,
- "periodSeconds": 10,
- "timeoutSeconds": 5,
- "successThreshold": 1,
- "failureThreshold": 3,
- },
- "livenessProbe": {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 60,
- "timeoutSeconds": 30,
- "failureThreshold": 10,
- },
- },
- }
- ],
- "kubernetesResources": {"ingressResources": []},
- }
-
- spec = pod_spec.make_pod_spec(
- image_info, config, relation_state, app_name, port
- )
-
- self.assertDictEqual(expected_result, spec)
-
- def test_make_pod_spec_with_ingress(self) -> NoReturn:
- """Testing make pod spec."""
- image_info = {"upstream-source": "bitnami/kafka-exporter:latest"}
- config = {
- "site_url": "https://kafka-exporter",
- "cluster_issuer": "",
- "tls_secret_name": "kafka-exporter",
- "max_file_size": 0,
- "ingress_whitelist_source_range": "0.0.0.0/0",
- }
- relation_state = {
- "kafka_host": "kafka",
- "kafka_port": "9090",
- }
- app_name = "kafka-exporter"
- port = 9308
-
- expected_result = {
- "version": 3,
- "containers": [
- {
- "name": app_name,
- "imageDetails": image_info,
- "imagePullPolicy": "Always",
- "ports": [
- {
- "name": app_name,
- "containerPort": port,
- "protocol": "TCP",
- }
- ],
- "envConfig": {},
- "command": ["kafka_exporter", "--kafka.server=kafka:9090"],
- "kubernetes": {
- "readinessProbe": {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 10,
- "periodSeconds": 10,
- "timeoutSeconds": 5,
- "successThreshold": 1,
- "failureThreshold": 3,
- },
- "livenessProbe": {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 60,
- "timeoutSeconds": 30,
- "failureThreshold": 10,
- },
- },
- }
- ],
- "kubernetesResources": {
- "ingressResources": [
- {
- "name": "{}-ingress".format(app_name),
- "annotations": {
- "nginx.ingress.kubernetes.io/whitelist-source-range": config.get(
- "ingress_whitelist_source_range"
- ),
- },
- "spec": {
- "rules": [
- {
- "host": app_name,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ],
- "tls": [
- {
- "hosts": [app_name],
- "secretName": config.get("tls_secret_name"),
- }
- ],
- },
- }
- ],
- },
- }
-
- spec = pod_spec.make_pod_spec(
- image_info, config, relation_state, app_name, port
- )
-
- self.assertDictEqual(expected_result, spec)
-
- def test_make_pod_spec_without_image_info(self) -> NoReturn:
- """Testing make pod spec without image_info."""
- image_info = None
- config = {
- "site_url": "",
- "cluster_issuer": "",
- }
- relation_state = {
- "kafka_host": "kafka",
- "kafka_port": "9090",
- }
- app_name = "kafka-exporter"
- port = 9308
-
- spec = pod_spec.make_pod_spec(
- image_info, config, relation_state, app_name, port
- )
-
- self.assertIsNone(spec)
-
- def test_make_pod_spec_without_relation_state(self) -> NoReturn:
- """Testing make pod spec without relation_state."""
- image_info = {"upstream-source": "bitnami/kafka-exporter:latest"}
- config = {
- "site_url": "",
- "cluster_issuer": "",
- }
- relation_state = {}
- app_name = "kafka-exporter"
- port = 9308
-
- with self.assertRaises(ValueError):
- pod_spec.make_pod_spec(image_info, config, relation_state, app_name, port)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/installers/charm/kafka-exporter/tox.ini b/installers/charm/kafka-exporter/tox.ini
deleted file mode 100644
index f3c91440..00000000
--- a/installers/charm/kafka-exporter/tox.ini
+++ /dev/null
@@ -1,128 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-#######################################################################################
-
-[tox]
-envlist = black, cover, flake8, pylint, yamllint, safety
-skipsdist = true
-
-[tox:jenkins]
-toxworkdir = /tmp/.tox
-
-[testenv]
-basepython = python3.8
-setenv =
- VIRTUAL_ENV={envdir}
- PYTHONPATH = {toxinidir}:{toxinidir}/lib:{toxinidir}/src
- PYTHONDONTWRITEBYTECODE = 1
-deps = -r{toxinidir}/requirements.txt
-
-
-#######################################################################################
-[testenv:black]
-deps = black
-commands =
- black --check --diff src/ tests/
-
-
-#######################################################################################
-[testenv:cover]
-deps = {[testenv]deps}
- -r{toxinidir}/requirements-test.txt
- coverage
- nose2
-commands =
- sh -c 'rm -f nosetests.xml'
- coverage erase
- nose2 -C --coverage src
- coverage report --omit='*tests*'
- coverage html -d ./cover --omit='*tests*'
- coverage xml -o coverage.xml --omit=*tests*
-whitelist_externals = sh
-
-
-#######################################################################################
-[testenv:flake8]
-deps = flake8
- flake8-import-order
-commands =
- flake8 src/ tests/
-
-
-#######################################################################################
-[testenv:pylint]
-deps = {[testenv]deps}
- -r{toxinidir}/requirements-test.txt
- pylint==2.10.2
-commands =
- pylint -E src/ tests/
-
-
-#######################################################################################
-[testenv:safety]
-setenv =
- LC_ALL=C.UTF-8
- LANG=C.UTF-8
-deps = {[testenv]deps}
- safety
-commands =
- - safety check --full-report
-
-
-#######################################################################################
-[testenv:yamllint]
-deps = {[testenv]deps}
- -r{toxinidir}/requirements-test.txt
- yamllint
-commands = yamllint .
-
-#######################################################################################
-[testenv:build]
-passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY
-whitelist_externals =
- charmcraft
- sh
-commands =
- charmcraft pack
- sh -c 'ubuntu_version=20.04; \
- architectures="amd64-aarch64-arm64"; \
- charm_name=`cat metadata.yaml | grep -E "^name: " | cut -f 2 -d " "`; \
- mv $charm_name"_ubuntu-"$ubuntu_version-$architectures.charm $charm_name.charm'
-
-#######################################################################################
-[flake8]
-ignore =
- W291,
- W293,
- W503,
- E123,
- E125,
- E226,
- E241,
-exclude =
- .git,
- __pycache__,
- .tox,
-max-line-length = 120
-show-source = True
-builtins = _
-max-complexity = 10
-import-order-style = google
diff --git a/installers/charm/local_osm_bundle.yaml b/installers/charm/local_osm_bundle.yaml
deleted file mode 100644
index 6ab0df6b..00000000
--- a/installers/charm/local_osm_bundle.yaml
+++ /dev/null
@@ -1,215 +0,0 @@
-# Copyright 2020 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-name: osm
-bundle: kubernetes
-description: Local bundle for development
-applications:
- zookeeper:
- charm: zookeeper-k8s
- channel: latest/edge
- scale: 1
- storage:
- data: 100M
- annotations:
- gui-x: 0
- gui-y: 500
- mariadb:
- charm: charmed-osm-mariadb-k8s
- scale: 1
- series: kubernetes
- storage:
- database: 50M
- options:
- password: manopw
- root_password: osm4u
- user: mano
- annotations:
- gui-x: -300
- gui-y: -250
- kafka:
- charm: kafka-k8s
- channel: latest/edge
- scale: 1
- trust: true
- storage:
- data: 100M
- annotations:
- gui-x: 0
- gui-y: 250
- mongodb:
- charm: mongodb-k8s
- channel: latest/stable
- scale: 1
- series: kubernetes
- storage:
- db: 50M
- annotations:
- gui-x: 0
- gui-y: 0
- nbi:
- charm: ./nbi/osm-nbi.charm
- scale: 1
- resources:
- image: opensourcemano/nbi:testing-daily
- series: kubernetes
- options:
- database_commonkey: osm
- auth_backend: keystone
- log_level: DEBUG
- annotations:
- gui-x: 0
- gui-y: -250
- ro:
- charm: ./ro/osm-ro.charm
- scale: 1
- resources:
- image: opensourcemano/ro:testing-daily
- series: kubernetes
- options:
- log_level: DEBUG
- annotations:
- gui-x: -300
- gui-y: 250
- ng-ui:
- charm: ./ng-ui/osm-ng-ui.charm
- scale: 1
- resources:
- image: opensourcemano/ng-ui:testing-daily
- series: kubernetes
- annotations:
- gui-x: 600
- gui-y: 0
- lcm:
- charm: ./lcm/osm-lcm.charm
- scale: 1
- resources:
- image: opensourcemano/lcm:testing-daily
- series: kubernetes
- options:
- database_commonkey: osm
- log_level: DEBUG
- annotations:
- gui-x: -300
- gui-y: 0
- mon:
- charm: ./mon/osm-mon.charm
- scale: 1
- resources:
- image: opensourcemano/mon:testing-daily
- series: kubernetes
- options:
- database_commonkey: osm
- log_level: DEBUG
- keystone_enabled: true
- annotations:
- gui-x: 300
- gui-y: 0
- pol:
- charm: ./pol/osm-pol.charm
- scale: 1
- resources:
- image: opensourcemano/pol:testing-daily
- series: kubernetes
- options:
- log_level: DEBUG
- annotations:
- gui-x: -300
- gui-y: 500
- pla:
- charm: ./pla/osm-pla.charm
- scale: 1
- resources:
- image: opensourcemano/pla:testing-daily
- series: kubernetes
- options:
- log_level: DEBUG
- annotations:
- gui-x: 600
- gui-y: -250
- prometheus:
- charm: osm-prometheus
- channel: latest/edge
- scale: 1
- series: kubernetes
- storage:
- data: 50M
- options:
- default-target: "mon:8000"
- annotations:
- gui-x: 300
- gui-y: 250
- grafana:
- charm: osm-grafana
- channel: latest/edge
- scale: 1
- series: kubernetes
- annotations:
- gui-x: 300
- gui-y: 500
- keystone:
- charm: osm-keystone
- channel: latest/edge
- resources:
- keystone-image: opensourcemano/keystone:testing-daily
- scale: 1
- annotations:
- gui-x: 300
- gui-y: -250
-relations:
- - - grafana:prometheus
- - prometheus:prometheus
- - - kafka:zookeeper
- - zookeeper:zookeeper
- - - keystone:db
- - mariadb:mysql
- - - lcm:kafka
- - kafka:kafka
- - - lcm:mongodb
- - mongodb:database
- - - ro:ro
- - lcm:ro
- - - ro:kafka
- - kafka:kafka
- - - ro:mongodb
- - mongodb:database
- - - pol:kafka
- - kafka:kafka
- - - pol:mongodb
- - mongodb:database
- - - mon:mongodb
- - mongodb:database
- - - mon:kafka
- - kafka:kafka
- - - pla:kafka
- - kafka:kafka
- - - pla:mongodb
- - mongodb:database
- - - nbi:mongodb
- - mongodb:database
- - - nbi:kafka
- - kafka:kafka
- - - nbi:prometheus
- - prometheus:prometheus
- - - nbi:keystone
- - keystone:keystone
- - - mon:prometheus
- - prometheus:prometheus
- - - ng-ui:nbi
- - nbi:nbi
- - - mon:keystone
- - keystone:keystone
- - - mariadb:mysql
- - pol:mysql
- - - grafana:db
- - mariadb:mysql
diff --git a/installers/charm/local_osm_bundle_proxy.yaml b/installers/charm/local_osm_bundle_proxy.yaml
deleted file mode 100644
index d3285224..00000000
--- a/installers/charm/local_osm_bundle_proxy.yaml
+++ /dev/null
@@ -1,200 +0,0 @@
-# Copyright 2020 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-description: Single instance OSM bundle
-bundle: kubernetes
-variables:
- proxy: &proxy http://91.189.89.11:3128
- no-proxy: &no_proxy 127.0.0.1,localhost,::1,10.131.15.1/24,10.152.183.0/24,10.1.0.0/16
-applications:
- zookeeper-k8s:
- charm: "cs:~charmed-osm/zookeeper-k8s"
- channel: "stable"
- scale: 1
- series: kubernetes
- storage:
- database: 100M
- annotations:
- gui-x: 0
- gui-y: 550
- mariadb-k8s:
- charm: "cs:~charmed-osm/mariadb-k8s"
- channel: "stable"
- scale: 1
- series: kubernetes
- storage:
- database: 50M
- options:
- password: manopw
- root_password: osm4u
- user: mano
- annotations:
- gui-x: -250
- gui-y: -200
- kafka-k8s:
- charm: "cs:~charmed-osm/kafka-k8s"
- channel: "stable"
- scale: 1
- series: kubernetes
- storage:
- database: 100M
- annotations:
- gui-x: 0
- gui-y: 300
- mongodb-k8s:
- charm: "cs:~charmed-osm/mongodb-k8s"
- channel: "stable"
- scale: 1
- series: kubernetes
- storage:
- database: 50M
- options:
- replica-set: rs0
- namespace: osm
- enable-sidecar: true
- annotations:
- gui-x: 0
- gui-y: 50
- nbi:
- charm: "./nbi/build"
- scale: 1
- series: kubernetes
- options:
- database_commonkey: osm
- auth_backend: keystone
- annotations:
- gui-x: 0
- gui-y: -200
- ro:
- charm: "./ro/build"
- scale: 1
- series: kubernetes
- annotations:
- gui-x: -250
- gui-y: 300
- ng-ui:
- charm: "./ng-ui/build"
- scale: 1
- series: kubernetes
- annotations:
- gui-x: 500
- gui-y: 100
- lcm:
- charm: "./lcm/build"
- scale: 1
- series: kubernetes
- options:
- database_commonkey: osm
- vca_model_config_no_proxy: *no_proxy
- vca_model_config_juju_no_proxy: *no_proxy
- vca_model_config_apt_no_proxy: *no_proxy
- vca_model_config_juju_http_proxy: *proxy
- vca_model_config_juju_https_proxy: *proxy
- vca_model_config_apt_http_proxy: *proxy
- vca_model_config_apt_https_proxy: *proxy
- vca_model_config_snap_http_proxy: *proxy
- vca_model_config_snap_https_proxy: *proxy
- annotations:
- gui-x: -250
- gui-y: 50
- mon:
- charm: "./mon/build"
- scale: 1
- series: kubernetes
- options:
- database_commonkey: osm
- annotations:
- gui-x: 250
- gui-y: 50
- pol:
- charm: "./pol/build"
- scale: 1
- series: kubernetes
- annotations:
- gui-x: -250
- gui-y: 550
- pla:
- charm: "./pla/build"
- scale: 1
- series: kubernetes
- annotations:
- gui-x: 500
- gui-y: -200
- prometheus:
- charm: "./prometheus/build"
- channel: "stable"
- scale: 1
- series: kubernetes
- storage:
- data: 50M
- options:
- default-target: "mon:8000"
- annotations:
- gui-x: 250
- gui-y: 300
- grafana:
- charm: "./grafana/build"
- channel: "stable"
- scale: 1
- series: kubernetes
- annotations:
- gui-x: 250
- gui-y: 550
- keystone:
- charm: "./keystone/build"
- scale: 1
- series: kubernetes
- annotations:
- gui-x: -250
- gui-y: 550
-relations:
- - - grafana:prometheus
- - prometheus:prometheus
- - - kafka-k8s:zookeeper
- - zookeeper-k8s:zookeeper
- - - keystone:db
- - mariadb-k8s:mysql
- - - lcm:kafka
- - kafka-k8s:kafka
- - - lcm:mongodb
- - mongodb-k8s:mongo
- - - ro:ro
- - lcm:ro
- - - ro:kafka
- - kafka-k8s:kafka
- - - ro:mongodb
- - mongodb-k8s:mongo
- - - pol:kafka
- - kafka-k8s:kafka
- - - pol:mongodb
- - mongodb-k8s:mongo
- - - mon:mongodb
- - mongodb-k8s:mongo
- - - mon:kafka
- - kafka-k8s:kafka
- - - pla:kafka
- - kafka-k8s:kafka
- - - pla:mongodb
- - mongodb-k8s:mongo
- - - nbi:mongodb
- - mongodb-k8s:mongo
- - - nbi:kafka
- - kafka-k8s:kafka
- - - nbi:prometheus
- - prometheus:prometheus
- - - nbi:keystone
- - keystone:keystone
- - - mon:prometheus
- - prometheus:prometheus
- - - ng-ui:nbi
- - nbi:nbi
diff --git a/installers/charm/local_osm_ha_bundle.yaml b/installers/charm/local_osm_ha_bundle.yaml
deleted file mode 100644
index 79950cad..00000000
--- a/installers/charm/local_osm_ha_bundle.yaml
+++ /dev/null
@@ -1,216 +0,0 @@
-# Copyright 2020 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-name: osm-ha
-bundle: kubernetes
-description: Local bundle for development (HA)
-applications:
- zookeeper:
- charm: zookeeper-k8s
- channel: latest/edge
- scale: 3
- storage:
- data: 100M
- annotations:
- gui-x: 0
- gui-y: 500
- mariadb:
- charm: charmed-osm-mariadb-k8s
- scale: 3
- series: kubernetes
- storage:
- database: 300M
- options:
- password: manopw
- root_password: osm4u
- user: mano
- ha-mode: true
- annotations:
- gui-x: -300
- gui-y: -250
- kafka:
- charm: kafka-k8s
- channel: latest/edge
- scale: 3
- trust: true
- storage:
- data: 100M
- annotations:
- gui-x: 0
- gui-y: 250
- mongodb:
- charm: mongodb-k8s
- channel: latest/stable
- scale: 3
- series: kubernetes
- storage:
- db: 50M
- annotations:
- gui-x: 0
- gui-y: 0
- nbi:
- charm: ./nbi/osm-nbi.charm
- scale: 3
- resources:
- image: opensourcemano/nbi:testing-daily
- series: kubernetes
- options:
- database_commonkey: osm
- auth_backend: keystone
- log_level: DEBUG
- annotations:
- gui-x: 0
- gui-y: -250
- ro:
- charm: ./ro/osm-ro.charm
- scale: 3
- resources:
- image: opensourcemano/ro:testing-daily
- series: kubernetes
- options:
- log_level: DEBUG
- annotations:
- gui-x: -300
- gui-y: 250
- ng-ui:
- charm: ./ng-ui/osm-ng-ui.charm
- scale: 3
- resources:
- image: opensourcemano/ng-ui:testing-daily
- series: kubernetes
- annotations:
- gui-x: 600
- gui-y: 0
- lcm:
- charm: ./lcm/osm-lcm.charm
- scale: 3
- resources:
- image: opensourcemano/lcm:testing-daily
- series: kubernetes
- options:
- database_commonkey: osm
- log_level: DEBUG
- annotations:
- gui-x: -300
- gui-y: 0
- mon:
- charm: ./mon/osm-mon.charm
- scale: 3
- resources:
- image: opensourcemano/mon:testing-daily
- series: kubernetes
- options:
- database_commonkey: osm
- log_level: DEBUG
- keystone_enabled: true
- annotations:
- gui-x: 300
- gui-y: 0
- pol:
- charm: ./pol/osm-pol.charm
- scale: 3
- resources:
- image: opensourcemano/pol:testing-daily
- series: kubernetes
- options:
- log_level: DEBUG
- annotations:
- gui-x: -300
- gui-y: 500
- pla:
- charm: ./pla/osm-pla.charm
- scale: 3
- resources:
- image: opensourcemano/pla:testing-daily
- series: kubernetes
- options:
- log_level: DEBUG
- annotations:
- gui-x: 600
- gui-y: -250
- prometheus:
- charm: osm-prometheus
- channel: latest/edge
- scale: 3
- series: kubernetes
- storage:
- data: 50M
- options:
- default-target: "mon:8000"
- annotations:
- gui-x: 300
- gui-y: 250
- grafana:
- charm: osm-grafana
- channel: latest/edge
- scale: 3
- series: kubernetes
- annotations:
- gui-x: 300
- gui-y: 500
- keystone:
- charm: osm-keystone
- channel: latest/edge
- resources:
- keystone-image: opensourcemano/keystone:testing-daily
- scale: 1
- annotations:
- gui-x: 300
- gui-y: -250
-relations:
- - - grafana:prometheus
- - prometheus:prometheus
- - - kafka:zookeeper
- - zookeeper:zookeeper
- - - keystone:db
- - mariadb:mysql
- - - lcm:kafka
- - kafka:kafka
- - - lcm:mongodb
- - mongodb:database
- - - ro:ro
- - lcm:ro
- - - ro:kafka
- - kafka:kafka
- - - ro:mongodb
- - mongodb:database
- - - pol:kafka
- - kafka:kafka
- - - pol:mongodb
- - mongodb:database
- - - mon:mongodb
- - mongodb:database
- - - mon:kafka
- - kafka:kafka
- - - pla:kafka
- - kafka:kafka
- - - pla:mongodb
- - mongodb:database
- - - nbi:mongodb
- - mongodb:database
- - - nbi:kafka
- - kafka:kafka
- - - nbi:prometheus
- - prometheus:prometheus
- - - nbi:keystone
- - keystone:keystone
- - - mon:prometheus
- - prometheus:prometheus
- - - ng-ui:nbi
- - nbi:nbi
- - - mon:keystone
- - keystone:keystone
- - - mariadb:mysql
- - pol:mysql
- - - grafana:db
- - mariadb:mysql
diff --git a/installers/charm/mariadb-k8s/.gitignore b/installers/charm/mariadb-k8s/.gitignore
deleted file mode 100644
index 712eb963..00000000
--- a/installers/charm/mariadb-k8s/.gitignore
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-release/
-__pycache__
-.tox
diff --git a/installers/charm/mariadb-k8s/.yamllint.yaml b/installers/charm/mariadb-k8s/.yamllint.yaml
deleted file mode 100644
index 567eb5fe..00000000
--- a/installers/charm/mariadb-k8s/.yamllint.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
----
-
-extends: default
-
-yaml-files:
- - '*.yaml'
- - '*.yml'
- - '.yamllint'
-ignore: |
- reactive/
- .tox
- release/
diff --git a/installers/charm/mariadb-k8s/README.md b/installers/charm/mariadb-k8s/README.md
deleted file mode 100755
index 5c89de18..00000000
--- a/installers/charm/mariadb-k8s/README.md
+++ /dev/null
@@ -1,78 +0,0 @@
-
-
-# MariaDB Operator
-
-A Juju charm deploying and managing MariaDB on Kubernetes.
-
-## Overview
-
-MariaDB turns data into structured information in a wide array of
-applications, ranging from banking to websites. Originally designed as
-enhanced, drop-in replacement for MySQL, MariaDB is used because it is fast,
-scalable and robust, with a rich ecosystem of storage engines, plugins and
-many other tools make it very versatile for a wide variety of use cases.
-
-MariaDB is developed as open source software and as a relational database it
-provides an SQL interface for accessing data. The latest versions of MariaDB
-also include GIS and JSON features.
-
-More information can be found in [the MariaDB Knowledge Base](https://mariadb.com/kb/en/documentation/).
-
-## Usage
-
-For details on using Kubernetes with Juju [see here](https://juju.is/docs/kubernetes), and for
-details on using Juju with MicroK8s for easy local testing [see here](https://juju.is/docs/microk8s-cloud).
-
-To deploy the charm into a Juju Kubernetes model:
-
- juju deploy cs:~charmed-osm/mariadb
-
-The charm can then be easily related to an application that supports the mysql
-relation, such as:
-
- juju deploy cs:~charmed-osm/keystone
- juju relate keystone mariadb-k8s
-
-Once the "Workload" status of both mariadb-k8s and keystone is "active", using
-the "Application" IP of keystone (from `juju status`):
-
- # Change as appropriate for you juju model
- KEYSTONE_APPLICATION_IP=10.152.183.222
- curl -i -H "Content-Type: application/json" -d '
- { "auth": {
- "identity": {
- "methods": ["password"],
- "password": {
- "user": {
- "name": "admin",
- "domain": { "id": "default" },
- "password": "admin"
- }
- }
- }
- }
- ' "http://${KEYSTONE_APPLICATION_IP}:5000/v3/auth/tokens" ; echo
-
-This will create a token that you could use to query Keystone.
-
----
-
-For more details, [see here](https://charmhub.io/mariadb/docs/).
diff --git a/installers/charm/mariadb-k8s/actions.yaml b/installers/charm/mariadb-k8s/actions.yaml
deleted file mode 100644
index 0b33b6ab..00000000
--- a/installers/charm/mariadb-k8s/actions.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-backup:
- description: "Perform a backup"
- params:
- path:
- description: "Path for the backup inside the unit"
- type: string
- default: "/var/lib/mysql"
-restore:
- description: "Restore from a backup"
- params:
- path:
- description: "Path for the backup inside the unit"
- type: string
- default: "/var/lib/mysql"
-remove-backup:
- description: "Remove backup from unit"
- params:
- path:
- description: "Path for the backup inside the unit"
- type: string
- default: "/var/lib/mysql"
diff --git a/installers/charm/mariadb-k8s/actions/backup b/installers/charm/mariadb-k8s/actions/backup
deleted file mode 100755
index 7bfb5e4c..00000000
--- a/installers/charm/mariadb-k8s/actions/backup
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-DB_BACKUP_PATH=`action-get path`
-mkdir -p $DB_BACKUP_PATH
-ROOT_PASSWORD=`config-get root_password`
-mysqldump -u root -p$ROOT_PASSWORD --single-transaction --all-databases | gzip > $DB_BACKUP_PATH/backup.sql.gz || action-fail "Backup failed"
-action-set copy.cmd="kubectl cp $JUJU_MODEL_NAME/$HOSTNAME:$DB_BACKUP_PATH/backup.sql.gz backup.sql.gz"
-action-set restore.cmd="kubectl cp backup.sql.gz $JUJU_MODEL_NAME/$HOSTNAME:$DB_BACKUP_PATH/backup.sql.gz"
-action-set restore.juju="juju run-action $JUJU_UNIT_NAME restore --wait"
-
diff --git a/installers/charm/mariadb-k8s/actions/remove-backup b/installers/charm/mariadb-k8s/actions/remove-backup
deleted file mode 100755
index f3043337..00000000
--- a/installers/charm/mariadb-k8s/actions/remove-backup
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-DB_BACKUP_PATH=`action-get path`
-rm $DB_BACKUP_PATH/backup.sql.gz || exit
-echo Backup successfully removed!
diff --git a/installers/charm/mariadb-k8s/actions/restore b/installers/charm/mariadb-k8s/actions/restore
deleted file mode 100755
index 768e68e1..00000000
--- a/installers/charm/mariadb-k8s/actions/restore
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-DB_BACKUP_PATH=`action-get path`
-ROOT_PASSWORD=`config-get root_password`
-gunzip -c $DB_BACKUP_PATH/backup.sql.gz | mysql -uroot -p$ROOT_PASSWORD || action-fail "Restore failed"
-action-set message="Backup restored successfully"
\ No newline at end of file
diff --git a/installers/charm/mariadb-k8s/charmcraft.yaml b/installers/charm/mariadb-k8s/charmcraft.yaml
deleted file mode 100644
index 69a510cb..00000000
--- a/installers/charm/mariadb-k8s/charmcraft.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-type: "charm"
-bases:
- - build-on:
- - name: "ubuntu"
- channel: "20.04"
- run-on:
- - name: "ubuntu"
- channel: "20.04"
-parts:
- charm:
- source: .
- plugin: reactive
- build-snaps: [charm]
diff --git a/installers/charm/mariadb-k8s/config.yaml b/installers/charm/mariadb-k8s/config.yaml
deleted file mode 100755
index 8a606a4c..00000000
--- a/installers/charm/mariadb-k8s/config.yaml
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-options:
- user:
- type: string
- description: 'The database user name.'
- default: 'mysql'
- password:
- type: string
- description: 'The database user password.'
- default: 'password'
- database:
- type: string
- description: 'The database name.'
- default: 'database'
- root_password:
- type: string
- description: 'The database root password.'
- default: 'root'
- mysql_port:
- type: string
- description: 'The mysql port'
- default: '3306'
- query-cache-type:
- default: "OFF"
- type: string
- description: "Query cache is usually a good idea, \
- but can hurt concurrency. \
- Valid values are \"OFF\", \"ON\", or \"DEMAND\"."
- query-cache-size:
- default: !!int "0"
- type: int
- description: "Override the computed version from dataset-size. \
- Still works if query-cache-type is \"OFF\" since sessions \
- can override the cache type setting on their own."
- ha-mode:
- type: boolean
- description: Indicates if the charm should have the capabilities to scale
- default: false
- image:
- type: string
- description: OCI image
- default: rocks.canonical.com:443/mariadb/server:10.3
- ha-image:
- type: string
- description: OCI image
- default: rocks.canonical.com:443/canonicalosm/galera-mysql:latest
diff --git a/installers/charm/mariadb-k8s/icon.svg b/installers/charm/mariadb-k8s/icon.svg
deleted file mode 100644
index 69b42ee0..00000000
--- a/installers/charm/mariadb-k8s/icon.svg
+++ /dev/null
@@ -1,345 +0,0 @@
-
-
-
-
diff --git a/installers/charm/mariadb-k8s/layer.yaml b/installers/charm/mariadb-k8s/layer.yaml
deleted file mode 100644
index f9b5dd94..00000000
--- a/installers/charm/mariadb-k8s/layer.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-includes:
- - "layer:caas-base"
- - 'layer:status'
- - 'layer:leadership'
- - "layer:osm-common"
- - 'interface:juju-relation-mysql'
-
-repo: https://github.com/wallyworld/caas.git
diff --git a/installers/charm/mariadb-k8s/metadata.yaml b/installers/charm/mariadb-k8s/metadata.yaml
deleted file mode 100755
index a8021153..00000000
--- a/installers/charm/mariadb-k8s/metadata.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-name: osm-mariadb
-summary: MariaDB is a popular database server made by the developers of MySQL.
-# docs: https://discourse.charmhub.io/t/mariadb-documentation-overview/4116
-maintainers:
- - OSM Charmers
-description: |
- MariaDB Server is one of the most popular database servers in the world.
- It's made by the original developers of MySQL and guaranteed to stay open
- source. Notable users include Wikipedia, WordPress.com and Google.
- https://mariadb.org/
-tags:
- - database
- - openstack
-provides:
- mysql:
- interface: mysql
-series:
- - kubernetes
-storage:
- database:
- type: filesystem
- location: /var/lib/mysql
-deployment:
- type: stateful
- service: cluster
diff --git a/installers/charm/mariadb-k8s/reactive/osm_mariadb.py b/installers/charm/mariadb-k8s/reactive/osm_mariadb.py
deleted file mode 100644
index 4eedcfbc..00000000
--- a/installers/charm/mariadb-k8s/reactive/osm_mariadb.py
+++ /dev/null
@@ -1,141 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-from charms.layer.caas_base import pod_spec_set
-from charms.reactive import when, when_not, hook
-from charms.reactive import endpoint_from_flag
-from charms.reactive.flags import set_flag, get_state, clear_flag
-
-from charmhelpers.core.hookenv import (
- log,
- metadata,
- config,
- application_name,
-)
-from charms import layer
-from charms.osm.k8s import is_pod_up, get_service_ip
-
-
-@hook("upgrade-charm")
-@when("leadership.is_leader")
-def upgrade():
- clear_flag("mariadb-k8s.configured")
-
-
-@when("config.changed")
-@when("leadership.is_leader")
-def restart():
- clear_flag("mariadb-k8s.configured")
-
-
-@when_not("mariadb-k8s.configured")
-@when("leadership.is_leader")
-def configure():
- layer.status.maintenance("Configuring mariadb-k8s container")
-
- spec = make_pod_spec()
- log("set pod spec:\n{}".format(spec))
- pod_spec_set(spec)
-
- set_flag("mariadb-k8s.configured")
-
-
-@when("mariadb-k8s.configured")
-def set_mariadb_active():
- layer.status.active("ready")
-
-
-@when_not("leadership.is_leader")
-def non_leaders_active():
- layer.status.active("ready")
-
-
-@when("mariadb-k8s.configured", "mysql.database.requested")
-def provide_database():
- mysql = endpoint_from_flag("mysql.database.requested")
-
- if not is_pod_up("mysql"):
- log("The pod is not ready.")
- return
-
- for request, application in mysql.database_requests().items():
- try:
-
- log("request -> {0} for app -> {1}".format(request, application))
- user = get_state("user")
- password = get_state("password")
- database_name = get_state("database")
- root_password = get_state("root_password")
-
- log("db params: {0}:{1}@{2}".format(user, password, database_name))
-
- service_ip = get_service_ip("mysql")
- if service_ip:
- mysql.provide_database(
- request_id=request,
- host=service_ip,
- port=3306,
- database_name=database_name,
- user=user,
- password=password,
- root_password=root_password,
- )
- mysql.mark_complete()
- except Exception as e:
- log("Exception while providing database: {}".format(e))
-
-
-def make_pod_spec():
- """Make pod specification for Kubernetes
-
- Returns:
- pod_spec: Pod specification for Kubernetes
- """
- if config().get("ha-mode"):
- with open("reactive/spec_template_ha.yaml") as spec_file:
- pod_spec_template = spec_file.read()
- image = config().get("ha-image")
- else:
- with open("reactive/spec_template.yaml") as spec_file:
- pod_spec_template = spec_file.read()
- image = config().get("image")
-
- md = metadata()
- cfg = config()
-
- user = cfg.get("user")
- password = cfg.get("password")
- database = cfg.get("database")
- root_password = cfg.get("root_password")
- app_name = application_name()
-
- set_flag("user", user)
- set_flag("password", password)
- set_flag("database", database)
- set_flag("root_password", root_password)
-
- data = {
- "name": md.get("name"),
- "docker_image": image,
- "application_name": app_name,
- }
- data.update(cfg)
- return pod_spec_template % data
diff --git a/installers/charm/mariadb-k8s/reactive/spec_template.yaml b/installers/charm/mariadb-k8s/reactive/spec_template.yaml
deleted file mode 100644
index 0a1faccb..00000000
--- a/installers/charm/mariadb-k8s/reactive/spec_template.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-version: 2
-containers:
- - name: %(name)s
- image: %(docker_image)s
- ports:
- - containerPort: %(mysql_port)s
- protocol: TCP
- name: main
- config:
- MARIADB_ROOT_PASSWORD: %(root_password)s
- MARIADB_USER: %(user)s
- MARIADB_PASSWORD: %(password)s
- MARIADB_DATABASE: %(database)s
- kubernetes:
- readinessProbe:
- tcpSocket:
- port: %(mysql_port)s
- initialDelaySeconds: 10
- periodSeconds: 10
- timeoutSeconds: 5
- successThreshold: 1
- failureThreshold: 3
- livenessProbe:
- tcpSocket:
- port: %(mysql_port)s
- initialDelaySeconds: 120
- periodSeconds: 10
- timeoutSeconds: 5
- successThreshold: 1
- failureThreshold: 3
diff --git a/installers/charm/mariadb-k8s/reactive/spec_template_ha.yaml b/installers/charm/mariadb-k8s/reactive/spec_template_ha.yaml
deleted file mode 100644
index f5ebf20a..00000000
--- a/installers/charm/mariadb-k8s/reactive/spec_template_ha.yaml
+++ /dev/null
@@ -1,97 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-version: 2
-service:
- scalePolicy: serial
- annotations:
- service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
-containers:
- - name: %(name)s
- image: %(docker_image)s
- kubernetes:
- readinessProbe:
- tcpSocket:
- port: %(mysql_port)s
- initialDelaySeconds: 10
- periodSeconds: 10
- timeoutSeconds: 5
- successThreshold: 1
- failureThreshold: 3
- livenessProbe:
- exec:
- command: ["bash", "-c", "mysql -uroot -p\"${MYSQL_ROOT_PASSWORD}\" -e 'show databases;'"]
- initialDelaySeconds: 120
- periodSeconds: 10
- timeoutSeconds: 5
- successThreshold: 1
- failureThreshold: 3
- ports:
- - containerPort: %(mysql_port)s
- protocol: TCP
- name: main
- - containerPort: 4444
- name: sst
- - containerPort: 4567
- name: replication
- - containerPort: 4568
- name: ist
- config:
- MYSQL_ROOT_PASSWORD: %(root_password)s
- APPLICATION_NAME: %(application_name)s
- MYSQL_USER: %(user)s
- MYSQL_PASSWORD: %(password)s
- MYSQL_DATABASE: %(database)s
- files:
- - name: configurations
- mountPath: /etc/mysqlconfiguration
- files:
- galera.cnf: |
- [galera]
- user = mysql
- bind-address = 0.0.0.0
-
- default_storage_engine = InnoDB
- binlog_format = ROW
- innodb_autoinc_lock_mode = 2
- innodb_flush_log_at_trx_commit = 0
- query_cache_size = 0
- host_cache_size = 0
- query_cache_type = 0
-
- # MariaDB Galera settings
- wsrep_on=ON
- wsrep_provider=/usr/lib/galera/libgalera_smm.so
- wsrep_sst_method=rsync
-
- # Cluster settings (automatically updated)
- wsrep_cluster_address=gcomm://
- wsrep_cluster_name=vimdb_cluser
- wsrep_node_address=127.0.0.1
- mariadb.cnf: |
- [client]
- default-character-set = utf8
- [mysqld]
- character-set-server = utf8
- collation-server = utf8_general_ci
- plugin_load_add = feedbackx#
- # InnoDB tuning
- innodb_log_file_size = 50M
diff --git a/installers/charm/mariadb-k8s/test-requirements.txt b/installers/charm/mariadb-k8s/test-requirements.txt
deleted file mode 100644
index 04f2d768..00000000
--- a/installers/charm/mariadb-k8s/test-requirements.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-git+https://github.com/davigar15/zaza.git#egg=zaza
-mysql.connector
\ No newline at end of file
diff --git a/installers/charm/mariadb-k8s/tests/basic_deployment.py b/installers/charm/mariadb-k8s/tests/basic_deployment.py
deleted file mode 100644
index fd6520fe..00000000
--- a/installers/charm/mariadb-k8s/tests/basic_deployment.py
+++ /dev/null
@@ -1,136 +0,0 @@
-#!/usr/bin/python3
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-import unittest
-import zaza.model as model
-
-import mysql.connector as mysql
-
-# from mysql.connector import errorcode
-
-APPLICATION_NAME = "mariadb-k8s"
-UNIT_NAME = "mariadb-k8s/0"
-ROOT_USER = "root"
-ROOT_PASSWORD = "osm4u"
-USER = "mano"
-PASSWORD = "manopw"
-ACTION_SUCCESS_STATUS = "completed"
-
-
-def create_database(cnx, database_name):
- try:
- if not database_exists(cnx, database_name):
- cursor = cnx.cursor()
- cursor.execute(
- "CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(database_name)
- )
- return database_exists(cnx, database_name)
- else:
- return True
- except mysql.Error as err:
- print("Failed creating database {}: {}".format(database_name, err))
-
-
-def delete_database(cnx, database_name):
- try:
- if database_exists(cnx, database_name):
- cursor = cnx.cursor()
- cursor.execute("DROP DATABASE {}".format(database_name))
- return not database_exists(cnx, database_name)
- else:
- return True
- except mysql.Error as err:
- print("Failed deleting database {}: {}".format(database_name, err))
-
-
-def database_exists(cnx, database_name):
- try:
- cursor = cnx.cursor()
- cursor.execute("SHOW DATABASES")
- databases = cursor.fetchall()
- exists = False
- for database in databases:
- if database[0] == database_name:
- exists = True
- cursor.close()
- return exists
- except mysql.Error as err:
- print("Failed deleting database {}: {}".format(database_name, err))
- return False
-
-
-class BasicDeployment(unittest.TestCase):
- def setUp(self):
- super().setUp()
- self.ip = model.get_status().applications[APPLICATION_NAME]["public-address"]
- try:
- self.cnx = mysql.connect(
- user=ROOT_USER, password=ROOT_PASSWORD, host=self.ip
- )
- except mysql.Error as err:
- print("Couldn't connect to mariadb-k8s : {}".format(err))
-
- def tearDown(self):
- super().tearDown()
- self.cnx.close()
-
- def test_mariadb_connection_root(self):
- pass
-
- def test_mariadb_connection_user(self):
- try:
- cnx = mysql.connect(user=USER, password=PASSWORD, host=self.ip)
- cnx.close()
- except mysql.Error as err:
- print("Couldn't connect to mariadb-k8s with user creds: {}".format(err))
-
- def test_mariadb_create_database(self):
- created = create_database(self.cnx, "test_database")
- self.failIf(not created)
-
- def test_mariadb_backup_action(self, db_name="test_backup"):
- created = create_database(self.cnx, db_name)
- self.failIf(not created)
- try:
- action = model.run_action(UNIT_NAME, "backup", raise_on_failure=True)
- self.assertEqual(action.status, ACTION_SUCCESS_STATUS)
- except model.ActionFailed as err:
- print("Action failed: {}".format(err))
-
- def test_mariadb_remove_backup_action(self):
- self.test_mariadb_backup_action(db_name="test_remove_backup")
- try:
- action = model.run_action(UNIT_NAME, "remove-backup", raise_on_failure=True)
- self.assertEqual(action.status, ACTION_SUCCESS_STATUS)
- except model.ActionFailed as err:
- print("Action failed: {}".format(err))
-
- def test_mariadb_restore_action(self):
- self.test_mariadb_backup_action(db_name="test_restore")
- deleted = delete_database(self.cnx, "test_restore")
- self.failIf(not deleted)
- try:
- action = model.run_action(UNIT_NAME, "restore", raise_on_failure=True)
- self.assertEqual(action.status, "completed")
- self.assertTrue(database_exists(self.cnx, "test_restore"))
- except model.ActionFailed as err:
- print("Action failed: {}".format(err))
diff --git a/installers/charm/mariadb-k8s/tests/bundles/mariadb-ha.yaml b/installers/charm/mariadb-k8s/tests/bundles/mariadb-ha.yaml
deleted file mode 100644
index 7692bd53..00000000
--- a/installers/charm/mariadb-k8s/tests/bundles/mariadb-ha.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-bundle: kubernetes
-applications:
- mariadb-k8s:
- charm: '../../release/'
- scale: 2
- options:
- password: manopw
- root_password: osm4u
- user: mano
- database: database
- mysql_port: "3306"
- query-cache-type: "OFF"
- query-cache-size: 0
- ha-mode: true
- image: 'rocks.canonical.com:443/canonicalosm/galera-mysql:latest'
- series: kubernetes
- storage:
- database: 50M
diff --git a/installers/charm/mariadb-k8s/tests/bundles/mariadb.yaml b/installers/charm/mariadb-k8s/tests/bundles/mariadb.yaml
deleted file mode 100644
index e3e3aa31..00000000
--- a/installers/charm/mariadb-k8s/tests/bundles/mariadb.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-bundle: kubernetes
-applications:
- mariadb-k8s:
- charm: '../../release/'
- scale: 1
- options:
- password: manopw
- root_password: osm4u
- user: mano
- database: database
- mysql_port: "3306"
- query-cache-type: "OFF"
- query-cache-size: 0
- ha-mode: false
- series: kubernetes
- storage:
- database: 50M
diff --git a/installers/charm/mariadb-k8s/tests/tests.yaml b/installers/charm/mariadb-k8s/tests/tests.yaml
deleted file mode 100644
index df2b59ce..00000000
--- a/installers/charm/mariadb-k8s/tests/tests.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-gate_bundles:
- - mariadb
- - mariadb-ha
-smoke_bundles:
- - mariadb
-tests:
- - tests.basic_deployment.BasicDeployment
diff --git a/installers/charm/mariadb-k8s/tox.ini b/installers/charm/mariadb-k8s/tox.ini
deleted file mode 100644
index 28d60be9..00000000
--- a/installers/charm/mariadb-k8s/tox.ini
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-[tox]
-envlist = pep8
-skipsdist = True
-
-[testenv]
-setenv = VIRTUAL_ENV={envdir}
- PYTHONHASHSEED=0
-whitelist_externals = juju
-passenv = HOME TERM CS_API_* OS_* AMULET_*
-deps = -r{toxinidir}/test-requirements.txt
-install_command =
- pip install {opts} {packages}
-
-[testenv:build]
-basepython = python3
-passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY
-setenv = CHARM_LAYERS_DIR = /tmp
- CHARM_INTERFACES_DIR = /tmp/canonical-osm/charms/interfaces/
-whitelist_externals = git
- charm
- rm
- mv
-commands =
- rm -rf /tmp/canonical-osm /tmp/osm-common
- rm -rf release
- git clone https://git.launchpad.net/canonical-osm /tmp/canonical-osm
- git clone https://git.launchpad.net/charm-osm-common /tmp/osm-common
- charm build . --build-dir /tmp
- mv /tmp/mariadb-k8s/ release/
-
-[testenv:black]
-basepython = python3
-deps =
- black
- yamllint
- flake8
-commands =
- black --check --diff .
- yamllint .
- flake8 reactive/ --max-line-length=88
- flake8 tests/ --max-line-length=88
-
-[testenv:pep8]
-basepython = python3
-deps=charm-tools
-commands = charm-proof
-
-[testenv:func-noop]
-basepython = python3
-commands =
- true
-
-[testenv:func]
-basepython = python3
-commands = functest-run-suite
-
-
-[testenv:func-smoke]
-basepython = python3
-commands = functest-run-suite --keep-model --smoke
-
-[testenv:venv]
-commands = {posargs}
diff --git a/installers/charm/mongodb-exporter/.gitignore b/installers/charm/mongodb-exporter/.gitignore
deleted file mode 100644
index 2885df27..00000000
--- a/installers/charm/mongodb-exporter/.gitignore
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-venv
-.vscode
-build
-*.charm
-.coverage
-coverage.xml
-.stestr
-cover
-release
\ No newline at end of file
diff --git a/installers/charm/mongodb-exporter/.jujuignore b/installers/charm/mongodb-exporter/.jujuignore
deleted file mode 100644
index 3ae3e7dc..00000000
--- a/installers/charm/mongodb-exporter/.jujuignore
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-venv
-.vscode
-build
-*.charm
-.coverage
-coverage.xml
-.gitignore
-.stestr
-cover
-release
-tests/
-requirements*
-tox.ini
diff --git a/installers/charm/mongodb-exporter/.yamllint.yaml b/installers/charm/mongodb-exporter/.yamllint.yaml
deleted file mode 100644
index d71fb69f..00000000
--- a/installers/charm/mongodb-exporter/.yamllint.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
----
-extends: default
-
-yaml-files:
- - "*.yaml"
- - "*.yml"
- - ".yamllint"
-ignore: |
- .tox
- cover/
- build/
- venv
- release/
diff --git a/installers/charm/mongodb-exporter/README.md b/installers/charm/mongodb-exporter/README.md
deleted file mode 100644
index 84df4c97..00000000
--- a/installers/charm/mongodb-exporter/README.md
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-# Prometheus Mongodb Exporter operator Charm for Kubernetes
-
-## Requirements
diff --git a/installers/charm/mongodb-exporter/charmcraft.yaml b/installers/charm/mongodb-exporter/charmcraft.yaml
deleted file mode 100644
index 0a285a9d..00000000
--- a/installers/charm/mongodb-exporter/charmcraft.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-type: charm
-bases:
- - build-on:
- - name: ubuntu
- channel: "20.04"
- architectures: ["amd64"]
- run-on:
- - name: ubuntu
- channel: "20.04"
- architectures:
- - amd64
- - aarch64
- - arm64
-parts:
- charm:
- build-packages: [git]
diff --git a/installers/charm/mongodb-exporter/config.yaml b/installers/charm/mongodb-exporter/config.yaml
deleted file mode 100644
index fe5cd630..00000000
--- a/installers/charm/mongodb-exporter/config.yaml
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-options:
- ingress_class:
- type: string
- description: |
- Ingress class name. This is useful for selecting the ingress to be used
- in case there are multiple ingresses in the underlying k8s clusters.
- ingress_whitelist_source_range:
- type: string
- description: |
- A comma-separated list of CIDRs to store in the
- ingress.kubernetes.io/whitelist-source-range annotation.
-
- This can be used to lock down access to
- Keystone based on source IP address.
- default: ""
- tls_secret_name:
- type: string
- description: TLS Secret name
- default: ""
- site_url:
- type: string
- description: Ingress URL
- default: ""
- cluster_issuer:
- type: string
- description: Name of the cluster issuer for TLS certificates
- default: ""
- mongodb_uri:
- type: string
- description: MongoDB URI (external database)
- image_pull_policy:
- type: string
- description: |
- ImagePullPolicy configuration for the pod.
- Possible values: always, ifnotpresent, never
- default: always
- security_context:
- description: Enables the security context of the pods
- type: boolean
- default: false
diff --git a/installers/charm/mongodb-exporter/metadata.yaml b/installers/charm/mongodb-exporter/metadata.yaml
deleted file mode 100644
index c3a0b776..00000000
--- a/installers/charm/mongodb-exporter/metadata.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-name: mongodb-exporter-k8s
-summary: OSM Prometheus Mongodb Exporter
-description: |
- A CAAS charm to deploy OSM's Prometheus Mongodb Exporter.
-series:
- - kubernetes
-tags:
- - kubernetes
- - osm
- - prometheus
- - mongodb-exporter
-min-juju-version: 2.8.0
-deployment:
- type: stateless
- service: cluster
-resources:
- image:
- type: oci-image
- description: Image of mongodb-exporter
- upstream-source: "bitnami/mongodb-exporter:0.30.0"
-provides:
- prometheus-scrape:
- interface: prometheus
- grafana-dashboard:
- interface: grafana-dashboard
-requires:
- mongodb:
- interface: mongodb
diff --git a/installers/charm/mongodb-exporter/requirements-test.txt b/installers/charm/mongodb-exporter/requirements-test.txt
deleted file mode 100644
index 316f6d20..00000000
--- a/installers/charm/mongodb-exporter/requirements-test.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-
-mock==4.0.3
diff --git a/installers/charm/mongodb-exporter/requirements.txt b/installers/charm/mongodb-exporter/requirements.txt
deleted file mode 100644
index 8bb93ad3..00000000
--- a/installers/charm/mongodb-exporter/requirements.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-git+https://github.com/charmed-osm/ops-lib-charmed-osm/@master
diff --git a/installers/charm/mongodb-exporter/src/charm.py b/installers/charm/mongodb-exporter/src/charm.py
deleted file mode 100755
index 0ee127c8..00000000
--- a/installers/charm/mongodb-exporter/src/charm.py
+++ /dev/null
@@ -1,275 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-# pylint: disable=E0213
-
-from ipaddress import ip_network
-import logging
-from pathlib import Path
-from typing import NoReturn, Optional
-from urllib.parse import urlparse
-
-from ops.main import main
-from opslib.osm.charm import CharmedOsmBase, RelationsMissing
-from opslib.osm.interfaces.grafana import GrafanaDashboardTarget
-from opslib.osm.interfaces.mongo import MongoClient
-from opslib.osm.interfaces.prometheus import PrometheusScrapeTarget
-from opslib.osm.pod import (
- ContainerV3Builder,
- IngressResourceV3Builder,
- PodRestartPolicy,
- PodSpecV3Builder,
-)
-from opslib.osm.validator import ModelValidator, validator
-
-
-logger = logging.getLogger(__name__)
-
-PORT = 9216
-
-
-class ConfigModel(ModelValidator):
- site_url: Optional[str]
- cluster_issuer: Optional[str]
- ingress_class: Optional[str]
- ingress_whitelist_source_range: Optional[str]
- tls_secret_name: Optional[str]
- mongodb_uri: Optional[str]
- image_pull_policy: str
- security_context: bool
-
- @validator("site_url")
- def validate_site_url(cls, v):
- if v:
- parsed = urlparse(v)
- if not parsed.scheme.startswith("http"):
- raise ValueError("value must start with http")
- return v
-
- @validator("ingress_whitelist_source_range")
- def validate_ingress_whitelist_source_range(cls, v):
- if v:
- ip_network(v)
- return v
-
- @validator("mongodb_uri")
- def validate_mongodb_uri(cls, v):
- if v and not v.startswith("mongodb://"):
- raise ValueError("mongodb_uri is not properly formed")
- return v
-
- @validator("image_pull_policy")
- def validate_image_pull_policy(cls, v):
- values = {
- "always": "Always",
- "ifnotpresent": "IfNotPresent",
- "never": "Never",
- }
- v = v.lower()
- if v not in values.keys():
- raise ValueError("value must be always, ifnotpresent or never")
- return values[v]
-
-
-class MongodbExporterCharm(CharmedOsmBase):
- def __init__(self, *args) -> NoReturn:
- super().__init__(*args, oci_image="image")
-
- # Provision Kafka relation to exchange information
- self.mongodb_client = MongoClient(self, "mongodb")
- self.framework.observe(self.on["mongodb"].relation_changed, self.configure_pod)
- self.framework.observe(self.on["mongodb"].relation_broken, self.configure_pod)
-
- # Register relation to provide a Scraping Target
- self.scrape_target = PrometheusScrapeTarget(self, "prometheus-scrape")
- self.framework.observe(
- self.on["prometheus-scrape"].relation_joined, self._publish_scrape_info
- )
-
- # Register relation to provide a Dasboard Target
- self.dashboard_target = GrafanaDashboardTarget(self, "grafana-dashboard")
- self.framework.observe(
- self.on["grafana-dashboard"].relation_joined, self._publish_dashboard_info
- )
-
- def _publish_scrape_info(self, event) -> NoReturn:
- """Publishes scraping information for Prometheus.
-
- Args:
- event (EventBase): Prometheus relation event.
- """
- if self.unit.is_leader():
- hostname = (
- urlparse(self.model.config["site_url"]).hostname
- if self.model.config["site_url"]
- else self.model.app.name
- )
- port = str(PORT)
- if self.model.config.get("site_url", "").startswith("https://"):
- port = "443"
- elif self.model.config.get("site_url", "").startswith("http://"):
- port = "80"
-
- self.scrape_target.publish_info(
- hostname=hostname,
- port=port,
- metrics_path="/metrics",
- scrape_interval="30s",
- scrape_timeout="15s",
- )
-
- def _publish_dashboard_info(self, event) -> NoReturn:
- """Publish dashboards for Grafana.
-
- Args:
- event (EventBase): Grafana relation event.
- """
- if self.unit.is_leader():
- self.dashboard_target.publish_info(
- name="osm-mongodb",
- dashboard=Path("templates/mongodb_exporter_dashboard.json").read_text(),
- )
-
- def _check_missing_dependencies(self, config: ConfigModel):
- """Check if there is any relation missing.
-
- Args:
- config (ConfigModel): object with configuration information.
-
- Raises:
- RelationsMissing: if kafka is missing.
- """
- missing_relations = []
-
- if not config.mongodb_uri and self.mongodb_client.is_missing_data_in_unit():
- missing_relations.append("mongodb")
-
- if missing_relations:
- raise RelationsMissing(missing_relations)
-
- def build_pod_spec(self, image_info):
- """Build the PodSpec to be used.
-
- Args:
- image_info (str): container image information.
-
- Returns:
- Dict: PodSpec information.
- """
- # Validate config
- config = ConfigModel(**dict(self.config))
-
- if config.mongodb_uri and not self.mongodb_client.is_missing_data_in_unit():
- raise Exception("Mongodb data cannot be provided via config and relation")
-
- # Check relations
- self._check_missing_dependencies(config)
-
- unparsed = (
- config.mongodb_uri
- if config.mongodb_uri
- else self.mongodb_client.connection_string
- )
- parsed = urlparse(unparsed)
- mongodb_uri = f"mongodb://{parsed.netloc.split(',')[0]}{parsed.path}"
- if parsed.query:
- mongodb_uri += f"?{parsed.query}"
-
- # Create Builder for the PodSpec
- pod_spec_builder = PodSpecV3Builder(
- enable_security_context=config.security_context
- )
-
- # Add secrets to the pod
- mongodb_secret_name = f"{self.app.name}-mongodb-secret"
- pod_spec_builder.add_secret(mongodb_secret_name, {"uri": mongodb_uri})
-
- # Build container
- container_builder = ContainerV3Builder(
- self.app.name,
- image_info,
- config.image_pull_policy,
- run_as_non_root=config.security_context,
- )
- container_builder.add_port(name="exporter", port=PORT)
- container_builder.add_http_readiness_probe(
- path="/api/health",
- port=PORT,
- initial_delay_seconds=10,
- period_seconds=10,
- timeout_seconds=5,
- success_threshold=1,
- failure_threshold=3,
- )
- container_builder.add_http_liveness_probe(
- path="/api/health",
- port=PORT,
- initial_delay_seconds=60,
- timeout_seconds=30,
- failure_threshold=10,
- )
-
- container_builder.add_secret_envs(mongodb_secret_name, {"MONGODB_URI": "uri"})
- container = container_builder.build()
-
- # Add container to PodSpec
- pod_spec_builder.add_container(container)
-
- # Add Pod restart policy
- restart_policy = PodRestartPolicy()
- restart_policy.add_secrets(secret_names=(mongodb_secret_name,))
- pod_spec_builder.set_restart_policy(restart_policy)
-
- # Add ingress resources to PodSpec if site url exists
- if config.site_url:
- parsed = urlparse(config.site_url)
- annotations = {}
- if config.ingress_class:
- annotations["kubernetes.io/ingress.class"] = config.ingress_class
- ingress_resource_builder = IngressResourceV3Builder(
- f"{self.app.name}-ingress", annotations
- )
-
- if config.ingress_whitelist_source_range:
- annotations[
- "nginx.ingress.kubernetes.io/whitelist-source-range"
- ] = config.ingress_whitelist_source_range
-
- if config.cluster_issuer:
- annotations["cert-manager.io/cluster-issuer"] = config.cluster_issuer
-
- if parsed.scheme == "https":
- ingress_resource_builder.add_tls(
- [parsed.hostname], config.tls_secret_name
- )
- else:
- annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false"
-
- ingress_resource_builder.add_rule(parsed.hostname, self.app.name, PORT)
- ingress_resource = ingress_resource_builder.build()
- pod_spec_builder.add_ingress_resource(ingress_resource)
-
- return pod_spec_builder.build()
-
-
-if __name__ == "__main__":
- main(MongodbExporterCharm)
diff --git a/installers/charm/mongodb-exporter/src/pod_spec.py b/installers/charm/mongodb-exporter/src/pod_spec.py
deleted file mode 100644
index ff42e02c..00000000
--- a/installers/charm/mongodb-exporter/src/pod_spec.py
+++ /dev/null
@@ -1,305 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-from ipaddress import ip_network
-import logging
-from typing import Any, Dict, List
-from urllib.parse import urlparse
-
-logger = logging.getLogger(__name__)
-
-
-def _validate_ip_network(network: str) -> bool:
- """Validate IP network.
-
- Args:
- network (str): IP network range.
-
- Returns:
- bool: True if valid, false otherwise.
- """
- if not network:
- return True
-
- try:
- ip_network(network)
- except ValueError:
- return False
-
- return True
-
-
-def _validate_data(config_data: Dict[str, Any], relation_data: Dict[str, Any]) -> bool:
- """Validates passed information.
-
- Args:
- config_data (Dict[str, Any]): configuration information.
- relation_data (Dict[str, Any]): relation information
-
- Raises:
- ValueError: when config and/or relation data is not valid.
- """
- config_validators = {
- "site_url": lambda value, _: isinstance(value, str)
- if value is not None
- else True,
- "cluster_issuer": lambda value, _: isinstance(value, str)
- if value is not None
- else True,
- "ingress_whitelist_source_range": lambda value, _: _validate_ip_network(value),
- "tls_secret_name": lambda value, _: isinstance(value, str)
- if value is not None
- else True,
- }
- relation_validators = {
- "mongodb_connection_string": lambda value, _: (
- isinstance(value, str) and value.startswith("mongodb://")
- )
- }
- problems = []
-
- for key, validator in config_validators.items():
- valid = validator(config_data.get(key), config_data)
-
- if not valid:
- problems.append(key)
-
- for key, validator in relation_validators.items():
- valid = validator(relation_data.get(key), relation_data)
-
- if not valid:
- problems.append(key)
-
- if len(problems) > 0:
- raise ValueError("Errors found in: {}".format(", ".join(problems)))
-
- return True
-
-
-def _make_pod_ports(port: int) -> List[Dict[str, Any]]:
- """Generate pod ports details.
-
- Args:
- port (int): port to expose.
-
- Returns:
- List[Dict[str, Any]]: pod port details.
- """
- return [
- {
- "name": "mongo-exporter",
- "containerPort": port,
- "protocol": "TCP",
- }
- ]
-
-
-def _make_pod_envconfig(
- config: Dict[str, Any], relation_state: Dict[str, Any]
-) -> Dict[str, Any]:
- """Generate pod environment configuration.
-
- Args:
- config (Dict[str, Any]): configuration information.
- relation_state (Dict[str, Any]): relation state information.
-
- Returns:
- Dict[str, Any]: pod environment configuration.
- """
- parsed = urlparse(relation_state.get("mongodb_connection_string"))
-
- envconfig = {
- "MONGODB_URI": f"mongodb://{parsed.netloc.split(',')[0]}{parsed.path}",
- }
-
- if parsed.query:
- envconfig["MONGODB_URI"] += f"?{parsed.query}"
-
- return envconfig
-
-
-def _make_pod_ingress_resources(
- config: Dict[str, Any], app_name: str, port: int
-) -> List[Dict[str, Any]]:
- """Generate pod ingress resources.
-
- Args:
- config (Dict[str, Any]): configuration information.
- app_name (str): application name.
- port (int): port to expose.
-
- Returns:
- List[Dict[str, Any]]: pod ingress resources.
- """
- site_url = config.get("site_url")
-
- if not site_url:
- return
-
- parsed = urlparse(site_url)
-
- if not parsed.scheme.startswith("http"):
- return
-
- ingress_whitelist_source_range = config["ingress_whitelist_source_range"]
- cluster_issuer = config["cluster_issuer"]
-
- annotations = {}
-
- if ingress_whitelist_source_range:
- annotations[
- "nginx.ingress.kubernetes.io/whitelist-source-range"
- ] = ingress_whitelist_source_range
-
- if cluster_issuer:
- annotations["cert-manager.io/cluster-issuer"] = cluster_issuer
-
- ingress_spec_tls = None
-
- if parsed.scheme == "https":
- ingress_spec_tls = [{"hosts": [parsed.hostname]}]
- tls_secret_name = config["tls_secret_name"]
- if tls_secret_name:
- ingress_spec_tls[0]["secretName"] = tls_secret_name
- else:
- annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false"
-
- ingress = {
- "name": "{}-ingress".format(app_name),
- "annotations": annotations,
- "spec": {
- "rules": [
- {
- "host": parsed.hostname,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ]
- },
- }
- if ingress_spec_tls:
- ingress["spec"]["tls"] = ingress_spec_tls
-
- return [ingress]
-
-
-def _make_readiness_probe(port: int) -> Dict[str, Any]:
- """Generate readiness probe.
-
- Args:
- port (int): service port.
-
- Returns:
- Dict[str, Any]: readiness probe.
- """
- return {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 10,
- "periodSeconds": 10,
- "timeoutSeconds": 5,
- "successThreshold": 1,
- "failureThreshold": 3,
- }
-
-
-def _make_liveness_probe(port: int) -> Dict[str, Any]:
- """Generate liveness probe.
-
- Args:
- port (int): service port.
-
- Returns:
- Dict[str, Any]: liveness probe.
- """
- return {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 60,
- "timeoutSeconds": 30,
- "failureThreshold": 10,
- }
-
-
-def make_pod_spec(
- image_info: Dict[str, str],
- config: Dict[str, Any],
- relation_state: Dict[str, Any],
- app_name: str = "mongodb-exporter",
- port: int = 9216,
-) -> Dict[str, Any]:
- """Generate the pod spec information.
-
- Args:
- image_info (Dict[str, str]): Object provided by
- OCIImageResource("image").fetch().
- config (Dict[str, Any]): Configuration information.
- relation_state (Dict[str, Any]): Relation state information.
- app_name (str, optional): Application name. Defaults to "ro".
- port (int, optional): Port for the container. Defaults to 9090.
-
- Returns:
- Dict[str, Any]: Pod spec dictionary for the charm.
- """
- if not image_info:
- return None
-
- _validate_data(config, relation_state)
-
- ports = _make_pod_ports(port)
- env_config = _make_pod_envconfig(config, relation_state)
- readiness_probe = _make_readiness_probe(port)
- liveness_probe = _make_liveness_probe(port)
- ingress_resources = _make_pod_ingress_resources(config, app_name, port)
-
- return {
- "version": 3,
- "containers": [
- {
- "name": app_name,
- "imageDetails": image_info,
- "imagePullPolicy": "Always",
- "ports": ports,
- "envConfig": env_config,
- "kubernetes": {
- "readinessProbe": readiness_probe,
- "livenessProbe": liveness_probe,
- },
- }
- ],
- "kubernetesResources": {
- "ingressResources": ingress_resources or [],
- },
- }
diff --git a/installers/charm/mongodb-exporter/templates/mongodb_exporter_dashboard.json b/installers/charm/mongodb-exporter/templates/mongodb_exporter_dashboard.json
deleted file mode 100644
index c6c64c27..00000000
--- a/installers/charm/mongodb-exporter/templates/mongodb_exporter_dashboard.json
+++ /dev/null
@@ -1,938 +0,0 @@
-{
- "annotations": {
- "list": [
- {
- "builtIn": 1,
- "datasource": "-- Grafana --",
- "enable": true,
- "hide": true,
- "iconColor": "rgba(0, 211, 255, 1)",
- "name": "Annotations & Alerts",
- "type": "dashboard"
- }
- ]
- },
- "description": "MongoDB Prometheus Exporter Dashboard.",
- "editable": true,
- "gnetId": 2583,
- "graphTooltip": 1,
- "id": 1,
- "iteration": 1615141074039,
- "links": [],
- "panels": [
- {
- "collapsed": false,
- "datasource": null,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 0
- },
- "id": 22,
- "panels": [],
- "repeat": "env",
- "title": "Health",
- "type": "row"
- },
- {
- "cacheTimeout": null,
- "colorBackground": false,
- "colorValue": true,
- "colors": [
- "rgba(245, 54, 54, 0.9)",
- "rgba(237, 129, 40, 0.89)",
- "rgba(50, 172, 45, 0.97)"
- ],
- "datasource": "prometheus - Juju generated source",
- "decimals": null,
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
- },
- "format": "s",
- "gauge": {
- "maxValue": 100,
- "minValue": 0,
- "show": false,
- "thresholdLabels": false,
- "thresholdMarkers": true
- },
- "gridPos": {
- "h": 4,
- "w": 12,
- "x": 0,
- "y": 1
- },
- "id": 10,
- "interval": null,
- "links": [],
- "mappingType": 1,
- "mappingTypes": [
- {
- "name": "value to text",
- "value": 1
- },
- {
- "name": "range to text",
- "value": 2
- }
- ],
- "maxDataPoints": 100,
- "nullPointMode": "connected",
- "nullText": null,
- "postfix": "",
- "postfixFontSize": "50%",
- "prefix": "",
- "prefixFontSize": "50%",
- "rangeMaps": [
- {
- "from": "null",
- "text": "N/A",
- "to": "null"
- }
- ],
- "sparkline": {
- "fillColor": "rgba(31, 118, 189, 0.18)",
- "full": false,
- "lineColor": "rgb(31, 120, 193)",
- "show": false
- },
- "tableColumn": "",
- "targets": [
- {
- "expr": "mongodb_ss_uptime{}",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "",
- "refId": "A",
- "step": 1800
- }
- ],
- "thresholds": "0,360",
- "title": "Uptime",
- "type": "singlestat",
- "valueFontSize": "80%",
- "valueMaps": [
- {
- "op": "=",
- "text": "N/A",
- "value": "null"
- }
- ],
- "valueName": "current"
- },
- {
- "cacheTimeout": null,
- "colorBackground": false,
- "colorValue": false,
- "colors": [
- "rgba(245, 54, 54, 0.9)",
- "rgba(237, 129, 40, 0.89)",
- "rgba(50, 172, 45, 0.97)"
- ],
- "datasource": "prometheus - Juju generated source",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
- },
- "format": "none",
- "gauge": {
- "maxValue": 100,
- "minValue": 0,
- "show": false,
- "thresholdLabels": false,
- "thresholdMarkers": true
- },
- "gridPos": {
- "h": 4,
- "w": 12,
- "x": 12,
- "y": 1
- },
- "id": 1,
- "interval": null,
- "links": [],
- "mappingType": 1,
- "mappingTypes": [
- {
- "name": "value to text",
- "value": 1
- },
- {
- "name": "range to text",
- "value": 2
- }
- ],
- "maxDataPoints": 100,
- "nullPointMode": "connected",
- "nullText": null,
- "postfix": "",
- "postfixFontSize": "50%",
- "prefix": "",
- "prefixFontSize": "50%",
- "rangeMaps": [
- {
- "from": "null",
- "text": "N/A",
- "to": "null"
- }
- ],
- "sparkline": {
- "fillColor": "rgba(31, 118, 189, 0.18)",
- "full": true,
- "lineColor": "rgb(31, 120, 193)",
- "show": true
- },
- "tableColumn": "",
- "targets": [
- {
- "expr": "mongodb_ss_connections{conn_type=\"current\"}",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "",
- "metric": "mongodb_connections",
- "refId": "A",
- "step": 1800
- }
- ],
- "thresholds": "",
- "title": "Open Connections",
- "type": "singlestat",
- "valueFontSize": "80%",
- "valueMaps": [
- {
- "op": "=",
- "text": "N/A",
- "value": "null"
- }
- ],
- "valueName": "avg"
- },
- {
- "collapsed": false,
- "datasource": null,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 5
- },
- "id": 20,
- "panels": [],
- "repeat": "env",
- "title": "Operations",
- "type": "row"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "prometheus - Juju generated source",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "links": []
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 6,
- "w": 10,
- "x": 0,
- "y": 6
- },
- "hiddenSeries": false,
- "id": 7,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.4.3",
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(mongodb_ss_opcounters[$interval])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{legacy_op_type}}",
- "refId": "A",
- "step": 240
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Query Operations",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "$$hashKey": "object:670",
- "format": "ops",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "$$hashKey": "object:671",
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "prometheus - Juju generated source",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "links": []
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 6,
- "w": 8,
- "x": 10,
- "y": 6
- },
- "hiddenSeries": false,
- "id": 9,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.4.3",
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "returned",
- "yaxis": 1
- }
- ],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(mongodb_ss_metrics_document[$interval])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{doc_op_type}}",
- "refId": "A",
- "step": 240
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Document Operations",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "$$hashKey": "object:699",
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "$$hashKey": "object:700",
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "prometheus - Juju generated source",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "links": []
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 6,
- "w": 6,
- "x": 18,
- "y": 6
- },
- "hiddenSeries": false,
- "id": 8,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.4.3",
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(mongodb_ss_opcounters[$interval])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "{{legacy_op_type}}",
- "refId": "A",
- "step": 600
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Document Query Executor",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "$$hashKey": "object:728",
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "$$hashKey": "object:729",
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "collapsed": false,
- "datasource": null,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 12
- },
- "id": 23,
- "panels": [],
- "repeat": null,
- "title": "Resources",
- "type": "row"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "prometheus - Juju generated source",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "links": []
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 0,
- "y": 13
- },
- "hiddenSeries": false,
- "id": 4,
- "legend": {
- "alignAsTable": false,
- "avg": false,
- "current": true,
- "hideEmpty": false,
- "hideZero": false,
- "max": false,
- "min": false,
- "rightSide": false,
- "show": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.4.3",
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "mongodb_ss_mem_resident",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "Resident",
- "refId": "A",
- "step": 240
- },
- {
- "expr": "mongodb_ss_mem_virtual",
- "hide": false,
- "interval": "",
- "legendFormat": "Virtual",
- "refId": "B"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Memory",
- "tooltip": {
- "shared": false,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": [
- "total"
- ]
- },
- "yaxes": [
- {
- "$$hashKey": "object:523",
- "format": "decmbytes",
- "label": "",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "$$hashKey": "object:524",
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "prometheus - Juju generated source",
- "fieldConfig": {
- "defaults": {
- "custom": {},
- "links": []
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 6,
- "w": 12,
- "x": 12,
- "y": 13
- },
- "hiddenSeries": false,
- "id": 5,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.4.3",
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "rate(mongodb_ss_network_bytesOut[$interval])",
- "format": "time_series",
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "Out",
- "metric": "mongodb_metrics_operation_total",
- "refId": "A",
- "step": 240
- },
- {
- "expr": "rate(mongodb_ss_network_bytesIn[$interval])",
- "hide": false,
- "interval": "",
- "intervalFactor": 2,
- "legendFormat": "In",
- "refId": "B"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Network I/O",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "$$hashKey": "object:579",
- "format": "decbytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "$$hashKey": "object:580",
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- }
- ],
- "refresh": "5s",
- "schemaVersion": 27,
- "style": "dark",
- "tags": [],
- "templating": {
- "list": [
- {
- "allValue": null,
- "current": {
- "selected": true,
- "text": [
- "All"
- ],
- "value": [
- "$__all"
- ]
- },
- "datasource": "prometheus - Juju generated source",
- "definition": "",
- "description": null,
- "error": null,
- "hide": 0,
- "includeAll": true,
- "label": "instance",
- "multi": true,
- "name": "instance",
- "options": [],
- "query": {
- "query": "label_values(mongodb_connections, instance)",
- "refId": "prometheus - Juju generated source-instance-Variable-Query"
- },
- "refresh": 1,
- "regex": "",
- "skipUrlSync": false,
- "sort": 1,
- "tagValuesQuery": "/.*-(.*?)-.*/",
- "tags": [],
- "tagsQuery": "label_values(mongodb_connections, instance)",
- "type": "query",
- "useTags": false
- },
- {
- "auto": true,
- "auto_count": 30,
- "auto_min": "10s",
- "current": {
- "selected": false,
- "text": "auto",
- "value": "$__auto_interval_interval"
- },
- "description": null,
- "error": null,
- "hide": 0,
- "label": null,
- "name": "interval",
- "options": [
- {
- "selected": true,
- "text": "auto",
- "value": "$__auto_interval_interval"
- },
- {
- "selected": false,
- "text": "1m",
- "value": "1m"
- },
- {
- "selected": false,
- "text": "10m",
- "value": "10m"
- },
- {
- "selected": false,
- "text": "30m",
- "value": "30m"
- },
- {
- "selected": false,
- "text": "1h",
- "value": "1h"
- },
- {
- "selected": false,
- "text": "6h",
- "value": "6h"
- },
- {
- "selected": false,
- "text": "12h",
- "value": "12h"
- },
- {
- "selected": false,
- "text": "1d",
- "value": "1d"
- },
- {
- "selected": false,
- "text": "7d",
- "value": "7d"
- },
- {
- "selected": false,
- "text": "14d",
- "value": "14d"
- },
- {
- "selected": false,
- "text": "30d",
- "value": "30d"
- }
- ],
- "query": "1m,10m,30m,1h,6h,12h,1d,7d,14d,30d",
- "refresh": 2,
- "skipUrlSync": false,
- "type": "interval"
- }
- ]
- },
- "time": {
- "from": "now/d",
- "to": "now"
- },
- "timepicker": {
- "refresh_intervals": [
- "5s",
- "10s",
- "30s",
- "1m",
- "5m",
- "15m",
- "30m",
- "1h",
- "2h",
- "1d"
- ],
- "time_options": [
- "5m",
- "15m",
- "1h",
- "6h",
- "12h",
- "24h",
- "2d",
- "7d",
- "30d"
- ]
- },
- "timezone": "browser",
- "title": "MongoDB",
- "uid": "HEK4NbtZk",
- "version": 17
-}
\ No newline at end of file
diff --git a/installers/charm/mongodb-exporter/tests/__init__.py b/installers/charm/mongodb-exporter/tests/__init__.py
deleted file mode 100644
index 90dc417c..00000000
--- a/installers/charm/mongodb-exporter/tests/__init__.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-"""Init mocking for unit tests."""
-
-import sys
-
-import mock
-
-
-class OCIImageResourceErrorMock(Exception):
- pass
-
-
-sys.path.append("src")
-
-oci_image = mock.MagicMock()
-oci_image.OCIImageResourceError = OCIImageResourceErrorMock
-sys.modules["oci_image"] = oci_image
-sys.modules["oci_image"].OCIImageResource().fetch.return_value = {}
diff --git a/installers/charm/mongodb-exporter/tests/test_charm.py b/installers/charm/mongodb-exporter/tests/test_charm.py
deleted file mode 100644
index 1675f5f5..00000000
--- a/installers/charm/mongodb-exporter/tests/test_charm.py
+++ /dev/null
@@ -1,583 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-import sys
-from typing import NoReturn
-import unittest
-
-from charm import MongodbExporterCharm
-from ops.model import ActiveStatus, BlockedStatus
-from ops.testing import Harness
-
-
-class TestCharm(unittest.TestCase):
- """Mongodb Exporter Charm unit tests."""
-
- def setUp(self) -> NoReturn:
- """Test setup"""
- self.image_info = sys.modules["oci_image"].OCIImageResource().fetch()
- self.harness = Harness(MongodbExporterCharm)
- self.harness.set_leader(is_leader=True)
- self.harness.begin()
- self.config = {
- "ingress_whitelist_source_range": "",
- "tls_secret_name": "",
- "site_url": "https://mongodb-exporter.192.168.100.100.nip.io",
- "cluster_issuer": "vault-issuer",
- }
- self.harness.update_config(self.config)
-
- def test_config_changed_no_relations(
- self,
- ) -> NoReturn:
- """Test ingress resources without HTTP."""
-
- self.harness.charm.on.config_changed.emit()
-
- # Assertions
- self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
- print(self.harness.charm.unit.status.message)
- self.assertTrue(
- all(
- relation in self.harness.charm.unit.status.message
- for relation in ["mongodb"]
- )
- )
-
- def test_config_changed_non_leader(
- self,
- ) -> NoReturn:
- """Test ingress resources without HTTP."""
- self.harness.set_leader(is_leader=False)
- self.harness.charm.on.config_changed.emit()
-
- # Assertions
- self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)
-
- def test_with_relations(
- self,
- ) -> NoReturn:
- "Test with relations"
- self.initialize_mongo_relation()
-
- # Verifying status
- self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
- def test_with_config(
- self,
- ) -> NoReturn:
- "Test with config"
- self.initialize_mongo_relation()
-
- # Verifying status
- self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
- def test_mongodb_exception_relation_and_config(
- self,
- ) -> NoReturn:
- self.initialize_mongo_config()
- self.initialize_mongo_relation()
-
- # Verifying status
- self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
- def initialize_mongo_relation(self):
- mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
- self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
- self.harness.update_relation_data(
- mongodb_relation_id,
- "mongodb/0",
- {"connection_string": "mongodb://mongo:27017"},
- )
-
- def initialize_mongo_config(self):
- self.harness.update_config({"mongodb_uri": "mongodb://mongo:27017"})
-
-
-if __name__ == "__main__":
- unittest.main()
-
-
-# class TestCharm(unittest.TestCase):
-# """Mongodb Exporter Charm unit tests."""
-#
-# def setUp(self) -> NoReturn:
-# """Test setup"""
-# self.harness = Harness(MongodbExporterCharm)
-# self.harness.set_leader(is_leader=True)
-# self.harness.begin()
-#
-# def test_on_start_without_relations(self) -> NoReturn:
-# """Test installation without any relation."""
-# self.harness.charm.on.start.emit()
-#
-# # Verifying status
-# self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-#
-# # Verifying status message
-# self.assertGreater(len(self.harness.charm.unit.status.message), 0)
-# self.assertTrue(
-# self.harness.charm.unit.status.message.startswith("Waiting for ")
-# )
-# self.assertIn("mongodb", self.harness.charm.unit.status.message)
-# self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation"))
-#
-# def test_on_start_with_relations_without_http(self) -> NoReturn:
-# """Test deployment."""
-# expected_result = {
-# "version": 3,
-# "containers": [
-# {
-# "name": "mongodb-exporter",
-# "imageDetails": self.harness.charm.image.fetch(),
-# "imagePullPolicy": "Always",
-# "ports": [
-# {
-# "name": "mongo-exporter",
-# "containerPort": 9216,
-# "protocol": "TCP",
-# }
-# ],
-# "envConfig": {
-# "MONGODB_URI": "mongodb://mongo",
-# },
-# "kubernetes": {
-# "readinessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9216,
-# },
-# "initialDelaySeconds": 10,
-# "periodSeconds": 10,
-# "timeoutSeconds": 5,
-# "successThreshold": 1,
-# "failureThreshold": 3,
-# },
-# "livenessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9216,
-# },
-# "initialDelaySeconds": 60,
-# "timeoutSeconds": 30,
-# "failureThreshold": 10,
-# },
-# },
-# },
-# ],
-# "kubernetesResources": {"ingressResources": []},
-# }
-#
-# self.harness.charm.on.start.emit()
-#
-# # Initializing the mongodb relation
-# relation_id = self.harness.add_relation("mongodb", "mongodb")
-# self.harness.add_relation_unit(relation_id, "mongodb/0")
-# self.harness.update_relation_data(
-# relation_id,
-# "mongodb/0",
-# {
-# "connection_string": "mongodb://mongo",
-# },
-# )
-#
-# # Verifying status
-# self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
-#
-# pod_spec, _ = self.harness.get_pod_spec()
-#
-# self.assertDictEqual(expected_result, pod_spec)
-#
-# def test_ingress_resources_with_http(self) -> NoReturn:
-# """Test ingress resources with HTTP."""
-# expected_result = {
-# "version": 3,
-# "containers": [
-# {
-# "name": "mongodb-exporter",
-# "imageDetails": self.harness.charm.image.fetch(),
-# "imagePullPolicy": "Always",
-# "ports": [
-# {
-# "name": "mongo-exporter",
-# "containerPort": 9216,
-# "protocol": "TCP",
-# }
-# ],
-# "envConfig": {
-# "MONGODB_URI": "mongodb://mongo",
-# },
-# "kubernetes": {
-# "readinessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9216,
-# },
-# "initialDelaySeconds": 10,
-# "periodSeconds": 10,
-# "timeoutSeconds": 5,
-# "successThreshold": 1,
-# "failureThreshold": 3,
-# },
-# "livenessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9216,
-# },
-# "initialDelaySeconds": 60,
-# "timeoutSeconds": 30,
-# "failureThreshold": 10,
-# },
-# },
-# },
-# ],
-# "kubernetesResources": {
-# "ingressResources": [
-# {
-# "name": "mongodb-exporter-ingress",
-# "annotations": {
-# "nginx.ingress.kubernetes.io/ssl-redirect": "false",
-# },
-# "spec": {
-# "rules": [
-# {
-# "host": "mongodb-exporter",
-# "http": {
-# "paths": [
-# {
-# "path": "/",
-# "backend": {
-# "serviceName": "mongodb-exporter",
-# "servicePort": 9216,
-# },
-# }
-# ]
-# },
-# }
-# ]
-# },
-# }
-# ],
-# },
-# }
-#
-# self.harness.charm.on.start.emit()
-#
-# # Initializing the mongodb relation
-# relation_id = self.harness.add_relation("mongodb", "mongodb")
-# self.harness.add_relation_unit(relation_id, "mongodb/0")
-# self.harness.update_relation_data(
-# relation_id,
-# "mongodb/0",
-# {
-# "connection_string": "mongodb://mongo",
-# },
-# )
-#
-# self.harness.update_config({"site_url": "http://mongodb-exporter"})
-#
-# pod_spec, _ = self.harness.get_pod_spec()
-#
-# self.assertDictEqual(expected_result, pod_spec)
-#
-# def test_ingress_resources_with_https(self) -> NoReturn:
-# """Test ingress resources with HTTPS."""
-# expected_result = {
-# "version": 3,
-# "containers": [
-# {
-# "name": "mongodb-exporter",
-# "imageDetails": self.harness.charm.image.fetch(),
-# "imagePullPolicy": "Always",
-# "ports": [
-# {
-# "name": "mongo-exporter",
-# "containerPort": 9216,
-# "protocol": "TCP",
-# }
-# ],
-# "envConfig": {
-# "MONGODB_URI": "mongodb://mongo",
-# },
-# "kubernetes": {
-# "readinessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9216,
-# },
-# "initialDelaySeconds": 10,
-# "periodSeconds": 10,
-# "timeoutSeconds": 5,
-# "successThreshold": 1,
-# "failureThreshold": 3,
-# },
-# "livenessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9216,
-# },
-# "initialDelaySeconds": 60,
-# "timeoutSeconds": 30,
-# "failureThreshold": 10,
-# },
-# },
-# },
-# ],
-# "kubernetesResources": {
-# "ingressResources": [
-# {
-# "name": "mongodb-exporter-ingress",
-# "annotations": {},
-# "spec": {
-# "rules": [
-# {
-# "host": "mongodb-exporter",
-# "http": {
-# "paths": [
-# {
-# "path": "/",
-# "backend": {
-# "serviceName": "mongodb-exporter",
-# "servicePort": 9216,
-# },
-# }
-# ]
-# },
-# }
-# ],
-# "tls": [
-# {
-# "hosts": ["mongodb-exporter"],
-# "secretName": "mongodb-exporter",
-# }
-# ],
-# },
-# }
-# ],
-# },
-# }
-#
-# self.harness.charm.on.start.emit()
-#
-# # Initializing the mongodb relation
-# relation_id = self.harness.add_relation("mongodb", "mongodb")
-# self.harness.add_relation_unit(relation_id, "mongodb/0")
-# self.harness.update_relation_data(
-# relation_id,
-# "mongodb/0",
-# {
-# "connection_string": "mongodb://mongo",
-# },
-# )
-#
-# self.harness.update_config(
-# {
-# "site_url": "https://mongodb-exporter",
-# "tls_secret_name": "mongodb-exporter",
-# }
-# )
-#
-# pod_spec, _ = self.harness.get_pod_spec()
-#
-# self.assertDictEqual(expected_result, pod_spec)
-#
-# def test_ingress_resources_with_https_and_ingress_whitelist(self) -> NoReturn:
-# """Test ingress resources with HTTPS and ingress whitelist."""
-# expected_result = {
-# "version": 3,
-# "containers": [
-# {
-# "name": "mongodb-exporter",
-# "imageDetails": self.harness.charm.image.fetch(),
-# "imagePullPolicy": "Always",
-# "ports": [
-# {
-# "name": "mongo-exporter",
-# "containerPort": 9216,
-# "protocol": "TCP",
-# }
-# ],
-# "envConfig": {
-# "MONGODB_URI": "mongodb://mongo",
-# },
-# "kubernetes": {
-# "readinessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9216,
-# },
-# "initialDelaySeconds": 10,
-# "periodSeconds": 10,
-# "timeoutSeconds": 5,
-# "successThreshold": 1,
-# "failureThreshold": 3,
-# },
-# "livenessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9216,
-# },
-# "initialDelaySeconds": 60,
-# "timeoutSeconds": 30,
-# "failureThreshold": 10,
-# },
-# },
-# },
-# ],
-# "kubernetesResources": {
-# "ingressResources": [
-# {
-# "name": "mongodb-exporter-ingress",
-# "annotations": {
-# "nginx.ingress.kubernetes.io/whitelist-source-range": "0.0.0.0/0",
-# },
-# "spec": {
-# "rules": [
-# {
-# "host": "mongodb-exporter",
-# "http": {
-# "paths": [
-# {
-# "path": "/",
-# "backend": {
-# "serviceName": "mongodb-exporter",
-# "servicePort": 9216,
-# },
-# }
-# ]
-# },
-# }
-# ],
-# "tls": [
-# {
-# "hosts": ["mongodb-exporter"],
-# "secretName": "mongodb-exporter",
-# }
-# ],
-# },
-# }
-# ],
-# },
-# }
-#
-# self.harness.charm.on.start.emit()
-#
-# # Initializing the mongodb relation
-# relation_id = self.harness.add_relation("mongodb", "mongodb")
-# self.harness.add_relation_unit(relation_id, "mongodb/0")
-# self.harness.update_relation_data(
-# relation_id,
-# "mongodb/0",
-# {
-# "connection_string": "mongodb://mongo",
-# },
-# )
-#
-# self.harness.update_config(
-# {
-# "site_url": "https://mongodb-exporter",
-# "tls_secret_name": "mongodb-exporter",
-# "ingress_whitelist_source_range": "0.0.0.0/0",
-# }
-# )
-#
-# pod_spec, _ = self.harness.get_pod_spec()
-#
-# self.assertDictEqual(expected_result, pod_spec)
-#
-# def test_on_mongodb_unit_relation_changed(self) -> NoReturn:
-# """Test to see if mongodb relation is updated."""
-# self.harness.charm.on.start.emit()
-#
-# # Initializing the mongodb relation
-# relation_id = self.harness.add_relation("mongodb", "mongodb")
-# self.harness.add_relation_unit(relation_id, "mongodb/0")
-# self.harness.update_relation_data(
-# relation_id,
-# "mongodb/0",
-# {
-# "connection_string": "mongodb://mongo",
-# },
-# )
-#
-# # Verifying status
-# self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
-#
-# def test_publish_scrape_info(self) -> NoReturn:
-# """Test to see if scrape relation is updated."""
-# expected_result = {
-# "hostname": "mongodb-exporter",
-# "port": "9216",
-# "metrics_path": "/metrics",
-# "scrape_interval": "30s",
-# "scrape_timeout": "15s",
-# }
-#
-# self.harness.charm.on.start.emit()
-#
-# relation_id = self.harness.add_relation("prometheus-scrape", "prometheus")
-# self.harness.add_relation_unit(relation_id, "prometheus/0")
-# relation_data = self.harness.get_relation_data(
-# relation_id, "mongodb-exporter/0"
-# )
-#
-# self.assertDictEqual(expected_result, relation_data)
-#
-# def test_publish_scrape_info_with_site_url(self) -> NoReturn:
-# """Test to see if target relation is updated."""
-# expected_result = {
-# "hostname": "mongodb-exporter-osm",
-# "port": "80",
-# "metrics_path": "/metrics",
-# "scrape_interval": "30s",
-# "scrape_timeout": "15s",
-# }
-#
-# self.harness.charm.on.start.emit()
-#
-# self.harness.update_config({"site_url": "http://mongodb-exporter-osm"})
-#
-# relation_id = self.harness.add_relation("prometheus-scrape", "prometheus")
-# self.harness.add_relation_unit(relation_id, "prometheus/0")
-# relation_data = self.harness.get_relation_data(
-# relation_id, "mongodb-exporter/0"
-# )
-#
-# self.assertDictEqual(expected_result, relation_data)
-#
-# def test_publish_dashboard_info(self) -> NoReturn:
-# """Test to see if dashboard relation is updated."""
-# self.harness.charm.on.start.emit()
-#
-# relation_id = self.harness.add_relation("grafana-dashboard", "grafana")
-# self.harness.add_relation_unit(relation_id, "grafana/0")
-# relation_data = self.harness.get_relation_data(
-# relation_id, "mongodb-exporter/0"
-# )
-#
-# self.assertEqual("osm-mongodb", relation_data["name"])
-# self.assertTrue("dashboard" in relation_data)
-# self.assertTrue(len(relation_data["dashboard"]) > 0)
-#
-#
-# if __name__ == "__main__":
-# unittest.main()
diff --git a/installers/charm/mongodb-exporter/tests/test_pod_spec.py b/installers/charm/mongodb-exporter/tests/test_pod_spec.py
deleted file mode 100644
index 94ab6fb5..00000000
--- a/installers/charm/mongodb-exporter/tests/test_pod_spec.py
+++ /dev/null
@@ -1,489 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-from typing import NoReturn
-import unittest
-
-import pod_spec
-
-
-class TestPodSpec(unittest.TestCase):
- """Pod spec unit tests."""
-
- def test_make_pod_ports(self) -> NoReturn:
- """Testing make pod ports."""
- port = 9216
-
- expected_result = [
- {
- "name": "mongo-exporter",
- "containerPort": port,
- "protocol": "TCP",
- }
- ]
-
- pod_ports = pod_spec._make_pod_ports(port)
-
- self.assertListEqual(expected_result, pod_ports)
-
- def test_make_pod_envconfig(self) -> NoReturn:
- """Teting make pod envconfig."""
- config = {}
- relation_state = {
- "mongodb_connection_string": "mongodb://mongo",
- }
-
- expected_result = {"MONGODB_URI": "mongodb://mongo"}
-
- pod_envconfig = pod_spec._make_pod_envconfig(config, relation_state)
-
- self.assertDictEqual(expected_result, pod_envconfig)
-
- def test_make_pod_ingress_resources_without_site_url(self) -> NoReturn:
- """Testing make pod ingress resources without site_url."""
- config = {
- "site_url": "",
- "cluster_issuer": "",
- }
- app_name = "mongodb-exporter"
- port = 9216
-
- pod_ingress_resources = pod_spec._make_pod_ingress_resources(
- config, app_name, port
- )
-
- self.assertIsNone(pod_ingress_resources)
-
- def test_make_pod_ingress_resources(self) -> NoReturn:
- """Testing make pod ingress resources."""
- config = {
- "site_url": "http://mongodb-exporter",
- "cluster_issuer": "",
- "ingress_whitelist_source_range": "",
- }
- app_name = "mongodb-exporter"
- port = 9216
-
- expected_result = [
- {
- "name": f"{app_name}-ingress",
- "annotations": {
- "nginx.ingress.kubernetes.io/ssl-redirect": "false",
- },
- "spec": {
- "rules": [
- {
- "host": app_name,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ]
- },
- }
- ]
-
- pod_ingress_resources = pod_spec._make_pod_ingress_resources(
- config, app_name, port
- )
-
- self.assertListEqual(expected_result, pod_ingress_resources)
-
- def test_make_pod_ingress_resources_with_whitelist_source_range(self) -> NoReturn:
- """Testing make pod ingress resources with whitelist_source_range."""
- config = {
- "site_url": "http://mongodb-exporter",
- "cluster_issuer": "",
- "ingress_whitelist_source_range": "0.0.0.0/0",
- }
- app_name = "mongodb-exporter"
- port = 9216
-
- expected_result = [
- {
- "name": f"{app_name}-ingress",
- "annotations": {
- "nginx.ingress.kubernetes.io/ssl-redirect": "false",
- "nginx.ingress.kubernetes.io/whitelist-source-range": config[
- "ingress_whitelist_source_range"
- ],
- },
- "spec": {
- "rules": [
- {
- "host": app_name,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ]
- },
- }
- ]
-
- pod_ingress_resources = pod_spec._make_pod_ingress_resources(
- config, app_name, port
- )
-
- self.assertListEqual(expected_result, pod_ingress_resources)
-
- def test_make_pod_ingress_resources_with_https(self) -> NoReturn:
- """Testing make pod ingress resources with HTTPs."""
- config = {
- "site_url": "https://mongodb-exporter",
- "cluster_issuer": "",
- "ingress_whitelist_source_range": "",
- "tls_secret_name": "",
- }
- app_name = "mongodb-exporter"
- port = 9216
-
- expected_result = [
- {
- "name": f"{app_name}-ingress",
- "annotations": {},
- "spec": {
- "rules": [
- {
- "host": app_name,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ],
- "tls": [{"hosts": [app_name]}],
- },
- }
- ]
-
- pod_ingress_resources = pod_spec._make_pod_ingress_resources(
- config, app_name, port
- )
-
- self.assertListEqual(expected_result, pod_ingress_resources)
-
- def test_make_pod_ingress_resources_with_https_tls_secret_name(self) -> NoReturn:
- """Testing make pod ingress resources with HTTPs and TLS secret name."""
- config = {
- "site_url": "https://mongodb-exporter",
- "cluster_issuer": "",
- "ingress_whitelist_source_range": "",
- "tls_secret_name": "secret_name",
- }
- app_name = "mongodb-exporter"
- port = 9216
-
- expected_result = [
- {
- "name": f"{app_name}-ingress",
- "annotations": {},
- "spec": {
- "rules": [
- {
- "host": app_name,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ],
- "tls": [
- {"hosts": [app_name], "secretName": config["tls_secret_name"]}
- ],
- },
- }
- ]
-
- pod_ingress_resources = pod_spec._make_pod_ingress_resources(
- config, app_name, port
- )
-
- self.assertListEqual(expected_result, pod_ingress_resources)
-
- def test_make_readiness_probe(self) -> NoReturn:
- """Testing make readiness probe."""
- port = 9216
-
- expected_result = {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 10,
- "periodSeconds": 10,
- "timeoutSeconds": 5,
- "successThreshold": 1,
- "failureThreshold": 3,
- }
-
- readiness_probe = pod_spec._make_readiness_probe(port)
-
- self.assertDictEqual(expected_result, readiness_probe)
-
- def test_make_liveness_probe(self) -> NoReturn:
- """Testing make liveness probe."""
- port = 9216
-
- expected_result = {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 60,
- "timeoutSeconds": 30,
- "failureThreshold": 10,
- }
-
- liveness_probe = pod_spec._make_liveness_probe(port)
-
- self.assertDictEqual(expected_result, liveness_probe)
-
- def test_make_pod_spec(self) -> NoReturn:
- """Testing make pod spec."""
- image_info = {"upstream-source": "bitnami/mongodb-exporter:latest"}
- config = {
- "site_url": "",
- "cluster_issuer": "",
- }
- relation_state = {
- "mongodb_connection_string": "mongodb://mongo",
- }
- app_name = "mongodb-exporter"
- port = 9216
-
- expected_result = {
- "version": 3,
- "containers": [
- {
- "name": app_name,
- "imageDetails": image_info,
- "imagePullPolicy": "Always",
- "ports": [
- {
- "name": "mongo-exporter",
- "containerPort": port,
- "protocol": "TCP",
- }
- ],
- "envConfig": {
- "MONGODB_URI": "mongodb://mongo",
- },
- "kubernetes": {
- "readinessProbe": {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 10,
- "periodSeconds": 10,
- "timeoutSeconds": 5,
- "successThreshold": 1,
- "failureThreshold": 3,
- },
- "livenessProbe": {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 60,
- "timeoutSeconds": 30,
- "failureThreshold": 10,
- },
- },
- }
- ],
- "kubernetesResources": {"ingressResources": []},
- }
-
- spec = pod_spec.make_pod_spec(
- image_info, config, relation_state, app_name, port
- )
-
- self.assertDictEqual(expected_result, spec)
-
- def test_make_pod_spec_with_ingress(self) -> NoReturn:
- """Testing make pod spec."""
- image_info = {"upstream-source": "bitnami/mongodb-exporter:latest"}
- config = {
- "site_url": "https://mongodb-exporter",
- "cluster_issuer": "",
- "tls_secret_name": "mongodb-exporter",
- "ingress_whitelist_source_range": "0.0.0.0/0",
- }
- relation_state = {
- "mongodb_connection_string": "mongodb://mongo",
- }
- app_name = "mongodb-exporter"
- port = 9216
-
- expected_result = {
- "version": 3,
- "containers": [
- {
- "name": app_name,
- "imageDetails": image_info,
- "imagePullPolicy": "Always",
- "ports": [
- {
- "name": "mongo-exporter",
- "containerPort": port,
- "protocol": "TCP",
- }
- ],
- "envConfig": {
- "MONGODB_URI": "mongodb://mongo",
- },
- "kubernetes": {
- "readinessProbe": {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 10,
- "periodSeconds": 10,
- "timeoutSeconds": 5,
- "successThreshold": 1,
- "failureThreshold": 3,
- },
- "livenessProbe": {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 60,
- "timeoutSeconds": 30,
- "failureThreshold": 10,
- },
- },
- }
- ],
- "kubernetesResources": {
- "ingressResources": [
- {
- "name": "{}-ingress".format(app_name),
- "annotations": {
- "nginx.ingress.kubernetes.io/whitelist-source-range": config.get(
- "ingress_whitelist_source_range"
- ),
- },
- "spec": {
- "rules": [
- {
- "host": app_name,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ],
- "tls": [
- {
- "hosts": [app_name],
- "secretName": config.get("tls_secret_name"),
- }
- ],
- },
- }
- ],
- },
- }
-
- spec = pod_spec.make_pod_spec(
- image_info, config, relation_state, app_name, port
- )
-
- self.assertDictEqual(expected_result, spec)
-
- def test_make_pod_spec_without_image_info(self) -> NoReturn:
- """Testing make pod spec without image_info."""
- image_info = None
- config = {
- "site_url": "",
- "cluster_issuer": "",
- }
- relation_state = {
- "mongodb_connection_string": "mongodb://mongo",
- }
- app_name = "mongodb-exporter"
- port = 9216
-
- spec = pod_spec.make_pod_spec(
- image_info, config, relation_state, app_name, port
- )
-
- self.assertIsNone(spec)
-
- def test_make_pod_spec_without_relation_state(self) -> NoReturn:
- """Testing make pod spec without relation_state."""
- image_info = {"upstream-source": "bitnami/mongodb-exporter:latest"}
- config = {
- "site_url": "",
- "cluster_issuer": "",
- }
- relation_state = {}
- app_name = "mongodb-exporter"
- port = 9216
-
- with self.assertRaises(ValueError):
- pod_spec.make_pod_spec(image_info, config, relation_state, app_name, port)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/installers/charm/mongodb-exporter/tox.ini b/installers/charm/mongodb-exporter/tox.ini
deleted file mode 100644
index 4c7970df..00000000
--- a/installers/charm/mongodb-exporter/tox.ini
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-#######################################################################################
-
-[tox]
-envlist = black, cover, flake8, pylint, yamllint, safety
-skipsdist = true
-
-[tox:jenkins]
-toxworkdir = /tmp/.tox
-
-[testenv]
-basepython = python3.8
-setenv = VIRTUAL_ENV={envdir}
- PYTHONDONTWRITEBYTECODE = 1
-deps = -r{toxinidir}/requirements.txt
-
-
-#######################################################################################
-[testenv:black]
-deps = black
-commands =
- black --check --diff src/ tests/
-
-
-#######################################################################################
-[testenv:cover]
-deps = {[testenv]deps}
- -r{toxinidir}/requirements-test.txt
- coverage
- nose2
-commands =
- sh -c 'rm -f nosetests.xml'
- coverage erase
- nose2 -C --coverage src
- coverage report --omit='*tests*'
- coverage html -d ./cover --omit='*tests*'
- coverage xml -o coverage.xml --omit=*tests*
-whitelist_externals = sh
-
-
-#######################################################################################
-[testenv:flake8]
-deps = flake8
- flake8-import-order
-commands =
- flake8 src/ tests/
-
-
-#######################################################################################
-[testenv:pylint]
-deps = {[testenv]deps}
- -r{toxinidir}/requirements-test.txt
- pylint==2.10.2
-commands =
- pylint -E src/ tests/
-
-
-#######################################################################################
-[testenv:safety]
-setenv =
- LC_ALL=C.UTF-8
- LANG=C.UTF-8
-deps = {[testenv]deps}
- safety
-commands =
- - safety check --full-report
-
-
-#######################################################################################
-[testenv:yamllint]
-deps = {[testenv]deps}
- -r{toxinidir}/requirements-test.txt
- yamllint
-commands = yamllint .
-
-#######################################################################################
-[testenv:build]
-passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY
-whitelist_externals =
- charmcraft
- sh
-commands =
- charmcraft pack
- sh -c 'ubuntu_version=20.04; \
- architectures="amd64-aarch64-arm64"; \
- charm_name=`cat metadata.yaml | grep -E "^name: " | cut -f 2 -d " "`; \
- mv $charm_name"_ubuntu-"$ubuntu_version-$architectures.charm $charm_name.charm'
-
-#######################################################################################
-[flake8]
-ignore =
- W291,
- W293,
- W503,
- E123,
- E125,
- E226,
- E241,
-exclude =
- .git,
- __pycache__,
- .tox,
-max-line-length = 120
-show-source = True
-builtins = _
-max-complexity = 10
-import-order-style = google
diff --git a/installers/charm/mysqld-exporter/.gitignore b/installers/charm/mysqld-exporter/.gitignore
deleted file mode 100644
index 2885df27..00000000
--- a/installers/charm/mysqld-exporter/.gitignore
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-venv
-.vscode
-build
-*.charm
-.coverage
-coverage.xml
-.stestr
-cover
-release
\ No newline at end of file
diff --git a/installers/charm/mysqld-exporter/.jujuignore b/installers/charm/mysqld-exporter/.jujuignore
deleted file mode 100644
index 3ae3e7dc..00000000
--- a/installers/charm/mysqld-exporter/.jujuignore
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-venv
-.vscode
-build
-*.charm
-.coverage
-coverage.xml
-.gitignore
-.stestr
-cover
-release
-tests/
-requirements*
-tox.ini
diff --git a/installers/charm/mysqld-exporter/.yamllint.yaml b/installers/charm/mysqld-exporter/.yamllint.yaml
deleted file mode 100644
index d71fb69f..00000000
--- a/installers/charm/mysqld-exporter/.yamllint.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
----
-extends: default
-
-yaml-files:
- - "*.yaml"
- - "*.yml"
- - ".yamllint"
-ignore: |
- .tox
- cover/
- build/
- venv
- release/
diff --git a/installers/charm/mysqld-exporter/README.md b/installers/charm/mysqld-exporter/README.md
deleted file mode 100644
index 481d53c1..00000000
--- a/installers/charm/mysqld-exporter/README.md
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-# Prometheus Mysql Exporter operator Charm for Kubernetes
-
-## Requirements
diff --git a/installers/charm/mysqld-exporter/charmcraft.yaml b/installers/charm/mysqld-exporter/charmcraft.yaml
deleted file mode 100644
index 0a285a9d..00000000
--- a/installers/charm/mysqld-exporter/charmcraft.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-type: charm
-bases:
- - build-on:
- - name: ubuntu
- channel: "20.04"
- architectures: ["amd64"]
- run-on:
- - name: ubuntu
- channel: "20.04"
- architectures:
- - amd64
- - aarch64
- - arm64
-parts:
- charm:
- build-packages: [git]
diff --git a/installers/charm/mysqld-exporter/config.yaml b/installers/charm/mysqld-exporter/config.yaml
deleted file mode 100644
index 5c0a24ba..00000000
--- a/installers/charm/mysqld-exporter/config.yaml
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-options:
- ingress_class:
- type: string
- description: |
- Ingress class name. This is useful for selecting the ingress to be used
- in case there are multiple ingresses in the underlying k8s clusters.
- ingress_whitelist_source_range:
- type: string
- description: |
- A comma-separated list of CIDRs to store in the
- ingress.kubernetes.io/whitelist-source-range annotation.
-
- This can be used to lock down access to
- Keystone based on source IP address.
- default: ""
- tls_secret_name:
- type: string
- description: TLS Secret name
- default: ""
- site_url:
- type: string
- description: Ingress URL
- default: ""
- cluster_issuer:
- type: string
- description: Name of the cluster issuer for TLS certificates
- default: ""
- mysql_uri:
- type: string
- description: MySQL URI (external database)
- image_pull_policy:
- type: string
- description: |
- ImagePullPolicy configuration for the pod.
- Possible values: always, ifnotpresent, never
- default: always
- security_context:
- description: Enables the security context of the pods
- type: boolean
- default: false
diff --git a/installers/charm/mysqld-exporter/metadata.yaml b/installers/charm/mysqld-exporter/metadata.yaml
deleted file mode 100644
index 7f6fb6ea..00000000
--- a/installers/charm/mysqld-exporter/metadata.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-name: mysqld-exporter-k8s
-summary: OSM Prometheus Mysql Exporter
-description: |
- A CAAS charm to deploy OSM's Prometheus Mysql Exporter.
-series:
- - kubernetes
-tags:
- - kubernetes
- - osm
- - prometheus
- - mysql-exporter
-min-juju-version: 2.8.0
-deployment:
- type: stateless
- service: cluster
-resources:
- image:
- type: oci-image
- description: Image of mysqld-exporter
- upstream-source: "bitnami/mysqld-exporter:0.14.0"
-provides:
- prometheus-scrape:
- interface: prometheus
- grafana-dashboard:
- interface: grafana-dashboard
-requires:
- mysql:
- interface: mysql
diff --git a/installers/charm/mysqld-exporter/requirements-test.txt b/installers/charm/mysqld-exporter/requirements-test.txt
deleted file mode 100644
index 316f6d20..00000000
--- a/installers/charm/mysqld-exporter/requirements-test.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-
-mock==4.0.3
diff --git a/installers/charm/mysqld-exporter/requirements.txt b/installers/charm/mysqld-exporter/requirements.txt
deleted file mode 100644
index 8bb93ad3..00000000
--- a/installers/charm/mysqld-exporter/requirements.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-git+https://github.com/charmed-osm/ops-lib-charmed-osm/@master
diff --git a/installers/charm/mysqld-exporter/src/charm.py b/installers/charm/mysqld-exporter/src/charm.py
deleted file mode 100755
index 153dbfd9..00000000
--- a/installers/charm/mysqld-exporter/src/charm.py
+++ /dev/null
@@ -1,276 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-# pylint: disable=E0213
-
-from ipaddress import ip_network
-import logging
-from pathlib import Path
-from typing import NoReturn, Optional
-from urllib.parse import urlparse
-
-from ops.main import main
-from opslib.osm.charm import CharmedOsmBase, RelationsMissing
-from opslib.osm.interfaces.grafana import GrafanaDashboardTarget
-from opslib.osm.interfaces.mysql import MysqlClient
-from opslib.osm.interfaces.prometheus import PrometheusScrapeTarget
-from opslib.osm.pod import (
- ContainerV3Builder,
- IngressResourceV3Builder,
- PodRestartPolicy,
- PodSpecV3Builder,
-)
-from opslib.osm.validator import ModelValidator, validator
-
-
-logger = logging.getLogger(__name__)
-
-PORT = 9104
-
-
-class ConfigModel(ModelValidator):
- site_url: Optional[str]
- cluster_issuer: Optional[str]
- ingress_class: Optional[str]
- ingress_whitelist_source_range: Optional[str]
- tls_secret_name: Optional[str]
- mysql_uri: Optional[str]
- image_pull_policy: str
- security_context: bool
-
- @validator("site_url")
- def validate_site_url(cls, v):
- if v:
- parsed = urlparse(v)
- if not parsed.scheme.startswith("http"):
- raise ValueError("value must start with http")
- return v
-
- @validator("ingress_whitelist_source_range")
- def validate_ingress_whitelist_source_range(cls, v):
- if v:
- ip_network(v)
- return v
-
- @validator("mysql_uri")
- def validate_mysql_uri(cls, v):
- if v and not v.startswith("mysql://"):
- raise ValueError("mysql_uri is not properly formed")
- return v
-
- @validator("image_pull_policy")
- def validate_image_pull_policy(cls, v):
- values = {
- "always": "Always",
- "ifnotpresent": "IfNotPresent",
- "never": "Never",
- }
- v = v.lower()
- if v not in values.keys():
- raise ValueError("value must be always, ifnotpresent or never")
- return values[v]
-
-
-class MysqlExporterCharm(CharmedOsmBase):
- def __init__(self, *args) -> NoReturn:
- super().__init__(*args, oci_image="image")
-
- # Provision Kafka relation to exchange information
- self.mysql_client = MysqlClient(self, "mysql")
- self.framework.observe(self.on["mysql"].relation_changed, self.configure_pod)
- self.framework.observe(self.on["mysql"].relation_broken, self.configure_pod)
-
- # Register relation to provide a Scraping Target
- self.scrape_target = PrometheusScrapeTarget(self, "prometheus-scrape")
- self.framework.observe(
- self.on["prometheus-scrape"].relation_joined, self._publish_scrape_info
- )
-
- # Register relation to provide a Dasboard Target
- self.dashboard_target = GrafanaDashboardTarget(self, "grafana-dashboard")
- self.framework.observe(
- self.on["grafana-dashboard"].relation_joined, self._publish_dashboard_info
- )
-
- def _publish_scrape_info(self, event) -> NoReturn:
- """Publishes scraping information for Prometheus.
-
- Args:
- event (EventBase): Prometheus relation event.
- """
- if self.unit.is_leader():
- hostname = (
- urlparse(self.model.config["site_url"]).hostname
- if self.model.config["site_url"]
- else self.model.app.name
- )
- port = str(PORT)
- if self.model.config.get("site_url", "").startswith("https://"):
- port = "443"
- elif self.model.config.get("site_url", "").startswith("http://"):
- port = "80"
-
- self.scrape_target.publish_info(
- hostname=hostname,
- port=port,
- metrics_path="/metrics",
- scrape_interval="30s",
- scrape_timeout="15s",
- )
-
- def _publish_dashboard_info(self, event) -> NoReturn:
- """Publish dashboards for Grafana.
-
- Args:
- event (EventBase): Grafana relation event.
- """
- if self.unit.is_leader():
- self.dashboard_target.publish_info(
- name="osm-mysql",
- dashboard=Path("templates/mysql_exporter_dashboard.json").read_text(),
- )
-
- def _check_missing_dependencies(self, config: ConfigModel):
- """Check if there is any relation missing.
-
- Args:
- config (ConfigModel): object with configuration information.
-
- Raises:
- RelationsMissing: if kafka is missing.
- """
- missing_relations = []
-
- if not config.mysql_uri and self.mysql_client.is_missing_data_in_unit():
- missing_relations.append("mysql")
-
- if missing_relations:
- raise RelationsMissing(missing_relations)
-
- def build_pod_spec(self, image_info):
- """Build the PodSpec to be used.
-
- Args:
- image_info (str): container image information.
-
- Returns:
- Dict: PodSpec information.
- """
- # Validate config
- config = ConfigModel(**dict(self.config))
-
- if config.mysql_uri and not self.mysql_client.is_missing_data_in_unit():
- raise Exception("Mysql data cannot be provided via config and relation")
-
- # Check relations
- self._check_missing_dependencies(config)
-
- data_source = (
- f'{config.mysql_uri.replace("mysql://", "").replace("@", "@(").split("/")[0]})/'
- if config.mysql_uri
- else f"root:{self.mysql_client.root_password}@({self.mysql_client.host}:{self.mysql_client.port})/"
- )
-
- # Create Builder for the PodSpec
- pod_spec_builder = PodSpecV3Builder(
- enable_security_context=config.security_context
- )
-
- # Add secrets to the pod
- mysql_secret_name = f"{self.app.name}-mysql-secret"
- pod_spec_builder.add_secret(
- mysql_secret_name,
- {"data_source": data_source},
- )
-
- # Build container
- container_builder = ContainerV3Builder(
- self.app.name,
- image_info,
- config.image_pull_policy,
- run_as_non_root=config.security_context,
- )
- container_builder.add_port(name="exporter", port=PORT)
- container_builder.add_http_readiness_probe(
- path="/api/health",
- port=PORT,
- initial_delay_seconds=10,
- period_seconds=10,
- timeout_seconds=5,
- success_threshold=1,
- failure_threshold=3,
- )
- container_builder.add_http_liveness_probe(
- path="/api/health",
- port=PORT,
- initial_delay_seconds=60,
- timeout_seconds=30,
- failure_threshold=10,
- )
- container_builder.add_secret_envs(
- mysql_secret_name, {"DATA_SOURCE_NAME": "data_source"}
- )
-
- container = container_builder.build()
-
- # Add container to PodSpec
- pod_spec_builder.add_container(container)
-
- # Add Pod restart policy
- restart_policy = PodRestartPolicy()
- restart_policy.add_secrets(secret_names=(mysql_secret_name))
- pod_spec_builder.set_restart_policy(restart_policy)
-
- # Add ingress resources to PodSpec if site url exists
- if config.site_url:
- parsed = urlparse(config.site_url)
- annotations = {}
- if config.ingress_class:
- annotations["kubernetes.io/ingress.class"] = config.ingress_class
- ingress_resource_builder = IngressResourceV3Builder(
- f"{self.app.name}-ingress", annotations
- )
-
- if config.ingress_whitelist_source_range:
- annotations[
- "nginx.ingress.kubernetes.io/whitelist-source-range"
- ] = config.ingress_whitelist_source_range
-
- if config.cluster_issuer:
- annotations["cert-manager.io/cluster-issuer"] = config.cluster_issuer
-
- if parsed.scheme == "https":
- ingress_resource_builder.add_tls(
- [parsed.hostname], config.tls_secret_name
- )
- else:
- annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false"
-
- ingress_resource_builder.add_rule(parsed.hostname, self.app.name, PORT)
- ingress_resource = ingress_resource_builder.build()
- pod_spec_builder.add_ingress_resource(ingress_resource)
-
- return pod_spec_builder.build()
-
-
-if __name__ == "__main__":
- main(MysqlExporterCharm)
diff --git a/installers/charm/mysqld-exporter/src/pod_spec.py b/installers/charm/mysqld-exporter/src/pod_spec.py
deleted file mode 100644
index 8068be7f..00000000
--- a/installers/charm/mysqld-exporter/src/pod_spec.py
+++ /dev/null
@@ -1,299 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-from ipaddress import ip_network
-import logging
-from typing import Any, Dict, List
-from urllib.parse import urlparse
-
-logger = logging.getLogger(__name__)
-
-
-def _validate_ip_network(network: str) -> bool:
- """Validate IP network.
-
- Args:
- network (str): IP network range.
-
- Returns:
- bool: True if valid, false otherwise.
- """
- if not network:
- return True
-
- try:
- ip_network(network)
- except ValueError:
- return False
-
- return True
-
-
-def _validate_data(config_data: Dict[str, Any], relation_data: Dict[str, Any]) -> bool:
- """Validates passed information.
-
- Args:
- config_data (Dict[str, Any]): configuration information.
- relation_data (Dict[str, Any]): relation information
-
- Raises:
- ValueError: when config and/or relation data is not valid.
- """
- config_validators = {
- "site_url": lambda value, _: isinstance(value, str)
- if value is not None
- else True,
- "cluster_issuer": lambda value, _: isinstance(value, str)
- if value is not None
- else True,
- "ingress_whitelist_source_range": lambda value, _: _validate_ip_network(value),
- "tls_secret_name": lambda value, _: isinstance(value, str)
- if value is not None
- else True,
- }
- relation_validators = {
- "mysql_host": lambda value, _: isinstance(value, str) and len(value) > 0,
- "mysql_port": lambda value, _: isinstance(value, str) and int(value) > 0,
- "mysql_user": lambda value, _: isinstance(value, str) and len(value) > 0,
- "mysql_password": lambda value, _: isinstance(value, str) and len(value) > 0,
- "mysql_root_password": lambda value, _: isinstance(value, str)
- and len(value) > 0,
- }
- problems = []
-
- for key, validator in config_validators.items():
- valid = validator(config_data.get(key), config_data)
-
- if not valid:
- problems.append(key)
-
- for key, validator in relation_validators.items():
- valid = validator(relation_data.get(key), relation_data)
-
- if not valid:
- problems.append(key)
-
- if len(problems) > 0:
- raise ValueError("Errors found in: {}".format(", ".join(problems)))
-
- return True
-
-
-def _make_pod_ports(port: int) -> List[Dict[str, Any]]:
- """Generate pod ports details.
-
- Args:
- port (int): port to expose.
-
- Returns:
- List[Dict[str, Any]]: pod port details.
- """
- return [{"name": "mysqld-exporter", "containerPort": port, "protocol": "TCP"}]
-
-
-def _make_pod_envconfig(
- config: Dict[str, Any], relation_state: Dict[str, Any]
-) -> Dict[str, Any]:
- """Generate pod environment configuration.
-
- Args:
- config (Dict[str, Any]): configuration information.
- relation_state (Dict[str, Any]): relation state information.
-
- Returns:
- Dict[str, Any]: pod environment configuration.
- """
- envconfig = {
- "DATA_SOURCE_NAME": "root:{mysql_root_password}@({mysql_host}:{mysql_port})/".format(
- **relation_state
- )
- }
-
- return envconfig
-
-
-def _make_pod_ingress_resources(
- config: Dict[str, Any], app_name: str, port: int
-) -> List[Dict[str, Any]]:
- """Generate pod ingress resources.
-
- Args:
- config (Dict[str, Any]): configuration information.
- app_name (str): application name.
- port (int): port to expose.
-
- Returns:
- List[Dict[str, Any]]: pod ingress resources.
- """
- site_url = config.get("site_url")
-
- if not site_url:
- return
-
- parsed = urlparse(site_url)
-
- if not parsed.scheme.startswith("http"):
- return
-
- ingress_whitelist_source_range = config["ingress_whitelist_source_range"]
- cluster_issuer = config["cluster_issuer"]
-
- annotations = {}
-
- if ingress_whitelist_source_range:
- annotations[
- "nginx.ingress.kubernetes.io/whitelist-source-range"
- ] = ingress_whitelist_source_range
-
- if cluster_issuer:
- annotations["cert-manager.io/cluster-issuer"] = cluster_issuer
-
- ingress_spec_tls = None
-
- if parsed.scheme == "https":
- ingress_spec_tls = [{"hosts": [parsed.hostname]}]
- tls_secret_name = config["tls_secret_name"]
- if tls_secret_name:
- ingress_spec_tls[0]["secretName"] = tls_secret_name
- else:
- annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false"
-
- ingress = {
- "name": "{}-ingress".format(app_name),
- "annotations": annotations,
- "spec": {
- "rules": [
- {
- "host": parsed.hostname,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ]
- },
- }
- if ingress_spec_tls:
- ingress["spec"]["tls"] = ingress_spec_tls
-
- return [ingress]
-
-
-def _make_readiness_probe(port: int) -> Dict[str, Any]:
- """Generate readiness probe.
-
- Args:
- port (int): service port.
-
- Returns:
- Dict[str, Any]: readiness probe.
- """
- return {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 10,
- "periodSeconds": 10,
- "timeoutSeconds": 5,
- "successThreshold": 1,
- "failureThreshold": 3,
- }
-
-
-def _make_liveness_probe(port: int) -> Dict[str, Any]:
- """Generate liveness probe.
-
- Args:
- port (int): service port.
-
- Returns:
- Dict[str, Any]: liveness probe.
- """
- return {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 60,
- "timeoutSeconds": 30,
- "failureThreshold": 10,
- }
-
-
-def make_pod_spec(
- image_info: Dict[str, str],
- config: Dict[str, Any],
- relation_state: Dict[str, Any],
- app_name: str = "mysqld-exporter",
- port: int = 9104,
-) -> Dict[str, Any]:
- """Generate the pod spec information.
-
- Args:
- image_info (Dict[str, str]): Object provided by
- OCIImageResource("image").fetch().
- config (Dict[str, Any]): Configuration information.
- relation_state (Dict[str, Any]): Relation state information.
- app_name (str, optional): Application name. Defaults to "ro".
- port (int, optional): Port for the container. Defaults to 9090.
-
- Returns:
- Dict[str, Any]: Pod spec dictionary for the charm.
- """
- if not image_info:
- return None
-
- _validate_data(config, relation_state)
-
- ports = _make_pod_ports(port)
- env_config = _make_pod_envconfig(config, relation_state)
- readiness_probe = _make_readiness_probe(port)
- liveness_probe = _make_liveness_probe(port)
- ingress_resources = _make_pod_ingress_resources(config, app_name, port)
-
- return {
- "version": 3,
- "containers": [
- {
- "name": app_name,
- "imageDetails": image_info,
- "imagePullPolicy": "Always",
- "ports": ports,
- "envConfig": env_config,
- "kubernetes": {
- "readinessProbe": readiness_probe,
- "livenessProbe": liveness_probe,
- },
- }
- ],
- "kubernetesResources": {
- "ingressResources": ingress_resources or [],
- },
- }
diff --git a/installers/charm/mysqld-exporter/templates/mysql_exporter_dashboard.json b/installers/charm/mysqld-exporter/templates/mysql_exporter_dashboard.json
deleted file mode 100644
index 9f9acac3..00000000
--- a/installers/charm/mysqld-exporter/templates/mysql_exporter_dashboard.json
+++ /dev/null
@@ -1,1145 +0,0 @@
-{
- "annotations": {
- "list": [
- {
- "builtIn": 1,
- "datasource": "-- Grafana --",
- "enable": true,
- "hide": true,
- "iconColor": "rgba(0, 211, 255, 1)",
- "name": "Annotations & Alerts",
- "type": "dashboard"
- }
- ]
- },
- "description": "Mysql dashboard",
- "editable": true,
- "gnetId": 6239,
- "graphTooltip": 0,
- "id": 34,
- "iteration": 1569307668513,
- "links": [],
- "panels": [
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 0
- },
- "id": 17,
- "panels": [],
- "title": "Global status",
- "type": "row"
- },
- {
- "cacheTimeout": null,
- "colorBackground": true,
- "colorValue": false,
- "colors": [
- "#bf1b00",
- "#508642",
- "#ef843c"
- ],
- "datasource": "prometheus - Juju generated source",
- "format": "none",
- "gauge": {
- "maxValue": 1,
- "minValue": 0,
- "show": false,
- "thresholdLabels": false,
- "thresholdMarkers": true
- },
- "gridPos": {
- "h": 7,
- "w": 6,
- "x": 0,
- "y": 1
- },
- "id": 11,
- "interval": null,
- "links": [],
- "mappingType": 1,
- "mappingTypes": [
- {
- "name": "value to text",
- "value": 1
- },
- {
- "name": "range to text",
- "value": 2
- }
- ],
- "maxDataPoints": 100,
- "nullPointMode": "connected",
- "nullText": null,
- "options": {},
- "postfix": "",
- "postfixFontSize": "50%",
- "prefix": "",
- "prefixFontSize": "50%",
- "rangeMaps": [
- {
- "from": "null",
- "text": "N/A",
- "to": "null"
- }
- ],
- "sparkline": {
- "fillColor": "rgba(31, 118, 189, 0.18)",
- "full": true,
- "lineColor": "rgb(31, 120, 193)",
- "show": true
- },
- "tableColumn": "",
- "targets": [
- {
- "expr": "mysql_up{release=\"$release\"}",
- "format": "time_series",
- "intervalFactor": 1,
- "refId": "A"
- }
- ],
- "thresholds": "1,2",
- "title": "Instance Up",
- "type": "singlestat",
- "valueFontSize": "80%",
- "valueMaps": [
- {
- "op": "=",
- "text": "N/A",
- "value": "null"
- }
- ],
- "valueName": "current"
- },
- {
- "cacheTimeout": null,
- "colorBackground": true,
- "colorValue": false,
- "colors": [
- "#d44a3a",
- "rgba(237, 129, 40, 0.89)",
- "#508642"
- ],
- "datasource": "prometheus - Juju generated source",
- "format": "s",
- "gauge": {
- "maxValue": 100,
- "minValue": 0,
- "show": false,
- "thresholdLabels": false,
- "thresholdMarkers": true
- },
- "gridPos": {
- "h": 7,
- "w": 6,
- "x": 6,
- "y": 1
- },
- "id": 15,
- "interval": null,
- "links": [],
- "mappingType": 1,
- "mappingTypes": [
- {
- "name": "value to text",
- "value": 1
- },
- {
- "name": "range to text",
- "value": 2
- }
- ],
- "maxDataPoints": 100,
- "nullPointMode": "connected",
- "nullText": null,
- "options": {},
- "postfix": "",
- "postfixFontSize": "50%",
- "prefix": "",
- "prefixFontSize": "50%",
- "rangeMaps": [
- {
- "from": "null",
- "text": "N/A",
- "to": "null"
- }
- ],
- "sparkline": {
- "fillColor": "rgba(31, 118, 189, 0.18)",
- "full": false,
- "lineColor": "rgb(31, 120, 193)",
- "show": true
- },
- "tableColumn": "",
- "targets": [
- {
- "expr": "mysql_global_status_uptime{release=\"$release\"}",
- "format": "time_series",
- "intervalFactor": 1,
- "refId": "A"
- }
- ],
- "thresholds": "25200,32400",
- "title": "Uptime",
- "type": "singlestat",
- "valueFontSize": "80%",
- "valueMaps": [
- {
- "op": "=",
- "text": "N/A",
- "value": "null"
- }
- ],
- "valueName": "current"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "prometheus - Juju generated source",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 12,
- "y": 1
- },
- "id": 29,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "mysql_global_status_max_used_connections{release=\"$release\"}",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "current",
- "refId": "A"
- },
- {
- "expr": "mysql_global_variables_max_connections{release=\"$release\"}",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "Max",
- "refId": "B"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Mysql Connections",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 8
- },
- "id": 19,
- "panels": [],
- "title": "I/O",
- "type": "row"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "prometheus - Juju generated source",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 9
- },
- "id": 5,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "write",
- "transform": "negative-Y"
- }
- ],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "irate(mysql_global_status_innodb_data_reads{release=\"$release\"}[10m])",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "reads",
- "refId": "A"
- },
- {
- "expr": "irate(mysql_global_status_innodb_data_writes{release=\"$release\"}[10m])",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "write",
- "refId": "B"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "mysql disk reads vs writes",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "prometheus - Juju generated source",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 12,
- "y": 9
- },
- "id": 9,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [
- {
- "alias": "/sent/",
- "transform": "negative-Y"
- }
- ],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "irate(mysql_global_status_bytes_received{release=\"$release\"}[5m])",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "received",
- "refId": "A"
- },
- {
- "expr": "irate(mysql_global_status_bytes_sent{release=\"$release\"}[5m])",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "sent",
- "refId": "B"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "mysql network received vs sent",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "prometheus - Juju generated source",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 0,
- "y": 18
- },
- "id": 2,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "irate(mysql_global_status_commands_total{release=\"$release\"}[5m]) > 0",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "{{ command }} - {{ release }}",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Query rates",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "prometheus - Juju generated source",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 12,
- "x": 12,
- "y": 18
- },
- "id": 25,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "mysql_global_status_threads_running{release=\"$release\"} ",
- "format": "time_series",
- "intervalFactor": 1,
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Running Threads",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "decimals": null,
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": "15",
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 25
- },
- "id": 21,
- "panels": [],
- "title": "Errors",
- "type": "row"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "prometheus - Juju generated source",
- "description": "The number of connections that were aborted because the client died without closing the connection properly.",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 26
- },
- "id": 13,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "mysql_global_status_aborted_clients{release=\"$release\"}",
- "format": "time_series",
- "intervalFactor": 1,
- "refId": "B"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Aborted clients",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "prometheus - Juju generated source",
- "description": "The number of failed attempts to connect to the MySQL server.",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 12,
- "y": 26
- },
- "id": 4,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "mysql_global_status_aborted_connects{release=\"$release\"}",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "",
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "mysql aborted Connects",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "collapsed": false,
- "gridPos": {
- "h": 1,
- "w": 24,
- "x": 0,
- "y": 35
- },
- "id": 23,
- "panels": [],
- "title": "Disk usage",
- "type": "row"
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "prometheus - Juju generated source",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 0,
- "y": 36
- },
- "id": 27,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": true,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(mysql_info_schema_table_size{component=\"data_length\",release=\"$release\"})",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "Tables",
- "refId": "A"
- },
- {
- "expr": "sum(mysql_info_schema_table_size{component=\"index_length\",release=\"$release\"})",
- "format": "time_series",
- "intervalFactor": 1,
- "legendFormat": "Indexes",
- "refId": "B"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Disk usage tables / indexes",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "decbytes",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "prometheus - Juju generated source",
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 9,
- "w": 12,
- "x": 12,
- "y": 36
- },
- "id": 7,
- "legend": {
- "avg": false,
- "current": false,
- "max": false,
- "min": false,
- "show": false,
- "total": false,
- "values": false
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "dataLinks": []
- },
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "expr": "sum(mysql_info_schema_table_rows{release=\"$release\"})",
- "format": "time_series",
- "intervalFactor": 1,
- "refId": "A"
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Sum of all rows",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "decimals": null,
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- }
- ],
- "schemaVersion": 19,
- "style": "dark",
- "tags": [
- ],
- "templating": {
- "list": [
- {
- "allValue": null,
- "current": {
- "isNone": true,
- "text": "None",
- "value": ""
- },
- "datasource": "prometheus - Juju generated source",
- "definition": "",
- "hide": 0,
- "includeAll": false,
- "label": null,
- "multi": false,
- "name": "release",
- "options": [],
- "query": "label_values(mysql_up,release)",
- "refresh": 1,
- "regex": "",
- "skipUrlSync": false,
- "sort": 0,
- "tagValuesQuery": "",
- "tags": [],
- "tagsQuery": "",
- "type": "query",
- "useTags": false
- }
- ]
- },
- "time": {
- "from": "now-1h",
- "to": "now"
- },
- "timepicker": {
- "refresh_intervals": [
- "5s",
- "10s",
- "30s",
- "1m",
- "5m",
- "15m",
- "30m",
- "1h",
- "2h",
- "1d"
- ],
- "time_options": [
- "5m",
- "15m",
- "1h",
- "6h",
- "12h",
- "24h",
- "2d",
- "7d",
- "30d"
- ]
- },
- "timezone": "",
- "title": "Mysql",
- "uid": "6-kPlS7ik",
- "version": 1
-}
diff --git a/installers/charm/mysqld-exporter/tests/__init__.py b/installers/charm/mysqld-exporter/tests/__init__.py
deleted file mode 100644
index 90dc417c..00000000
--- a/installers/charm/mysqld-exporter/tests/__init__.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-"""Init mocking for unit tests."""
-
-import sys
-
-import mock
-
-
-class OCIImageResourceErrorMock(Exception):
- pass
-
-
-sys.path.append("src")
-
-oci_image = mock.MagicMock()
-oci_image.OCIImageResourceError = OCIImageResourceErrorMock
-sys.modules["oci_image"] = oci_image
-sys.modules["oci_image"].OCIImageResource().fetch.return_value = {}
diff --git a/installers/charm/mysqld-exporter/tests/test_charm.py b/installers/charm/mysqld-exporter/tests/test_charm.py
deleted file mode 100644
index ddaacaf3..00000000
--- a/installers/charm/mysqld-exporter/tests/test_charm.py
+++ /dev/null
@@ -1,595 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-import sys
-from typing import NoReturn
-import unittest
-
-from charm import MysqlExporterCharm
-from ops.model import ActiveStatus, BlockedStatus
-from ops.testing import Harness
-
-
-class TestCharm(unittest.TestCase):
- """Mysql Exporter Charm unit tests."""
-
- def setUp(self) -> NoReturn:
- """Test setup"""
- self.image_info = sys.modules["oci_image"].OCIImageResource().fetch()
- self.harness = Harness(MysqlExporterCharm)
- self.harness.set_leader(is_leader=True)
- self.harness.begin()
- self.config = {
- "ingress_whitelist_source_range": "",
- "tls_secret_name": "",
- "site_url": "https://mysql-exporter.192.168.100.100.nip.io",
- "cluster_issuer": "vault-issuer",
- }
- self.harness.update_config(self.config)
-
- def test_config_changed_no_relations(
- self,
- ) -> NoReturn:
- """Test ingress resources without HTTP."""
-
- self.harness.charm.on.config_changed.emit()
-
- # Assertions
- self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
- print(self.harness.charm.unit.status.message)
- self.assertTrue(
- all(
- relation in self.harness.charm.unit.status.message
- for relation in ["mysql"]
- )
- )
-
- def test_config_changed_non_leader(
- self,
- ) -> NoReturn:
- """Test ingress resources without HTTP."""
- self.harness.set_leader(is_leader=False)
- self.harness.charm.on.config_changed.emit()
-
- # Assertions
- self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)
-
- def test_with_relations(
- self,
- ) -> NoReturn:
- "Test with relations"
- self.initialize_mysql_relation()
-
- # Verifying status
- self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
- def test_with_config(
- self,
- ) -> NoReturn:
- "Test with config"
- self.initialize_mysql_relation()
-
- # Verifying status
- self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
- def test_mysql_exception_relation_and_config(
- self,
- ) -> NoReturn:
- self.initialize_mysql_config()
- self.initialize_mysql_relation()
-
- # Verifying status
- self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
- def initialize_mysql_relation(self):
- mongodb_relation_id = self.harness.add_relation("mysql", "mysql")
- self.harness.add_relation_unit(mongodb_relation_id, "mysql/0")
- self.harness.update_relation_data(
- mongodb_relation_id,
- "mysql/0",
- {
- "user": "user",
- "password": "pass",
- "host": "host",
- "port": "1234",
- "database": "pol",
- "root_password": "root_password",
- },
- )
-
- def initialize_mysql_config(self):
- self.harness.update_config({"mysql_uri": "mysql://user:pass@mysql-host:3306"})
-
-
-if __name__ == "__main__":
- unittest.main()
-
-
-# class TestCharm(unittest.TestCase):
-# """Mysql Exporter Charm unit tests."""
-#
-# def setUp(self) -> NoReturn:
-# """Test setup"""
-# self.harness = Harness(MysqldExporterCharm)
-# self.harness.set_leader(is_leader=True)
-# self.harness.begin()
-#
-# def test_on_start_without_relations(self) -> NoReturn:
-# """Test installation without any relation."""
-# self.harness.charm.on.start.emit()
-#
-# # Verifying status
-# self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-#
-# # Verifying status message
-# self.assertGreater(len(self.harness.charm.unit.status.message), 0)
-# self.assertTrue(
-# self.harness.charm.unit.status.message.startswith("Waiting for ")
-# )
-# self.assertIn("mysql", self.harness.charm.unit.status.message)
-# self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation"))
-#
-# def test_on_start_with_relations_without_http(self) -> NoReturn:
-# """Test deployment."""
-# expected_result = {
-# "version": 3,
-# "containers": [
-# {
-# "name": "mysqld-exporter",
-# "imageDetails": self.harness.charm.image.fetch(),
-# "imagePullPolicy": "Always",
-# "ports": [
-# {
-# "name": "mysqld-exporter",
-# "containerPort": 9104,
-# "protocol": "TCP",
-# }
-# ],
-# "envConfig": {"DATA_SOURCE_NAME": "root:rootpw@(mysql:3306)/"},
-# "kubernetes": {
-# "readinessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9104,
-# },
-# "initialDelaySeconds": 10,
-# "periodSeconds": 10,
-# "timeoutSeconds": 5,
-# "successThreshold": 1,
-# "failureThreshold": 3,
-# },
-# "livenessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9104,
-# },
-# "initialDelaySeconds": 60,
-# "timeoutSeconds": 30,
-# "failureThreshold": 10,
-# },
-# },
-# },
-# ],
-# "kubernetesResources": {"ingressResources": []},
-# }
-#
-# self.harness.charm.on.start.emit()
-#
-# # Initializing the mysql relation
-# relation_id = self.harness.add_relation("mysql", "mysql")
-# self.harness.add_relation_unit(relation_id, "mysql/0")
-# self.harness.update_relation_data(
-# relation_id,
-# "mysql/0",
-# {
-# "host": "mysql",
-# "port": "3306",
-# "user": "mano",
-# "password": "manopw",
-# "root_password": "rootpw",
-# },
-# )
-#
-# # Verifying status
-# self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
-#
-# pod_spec, _ = self.harness.get_pod_spec()
-#
-# self.assertDictEqual(expected_result, pod_spec)
-#
-# def test_ingress_resources_with_http(self) -> NoReturn:
-# """Test ingress resources with HTTP."""
-# expected_result = {
-# "version": 3,
-# "containers": [
-# {
-# "name": "mysqld-exporter",
-# "imageDetails": self.harness.charm.image.fetch(),
-# "imagePullPolicy": "Always",
-# "ports": [
-# {
-# "name": "mysqld-exporter",
-# "containerPort": 9104,
-# "protocol": "TCP",
-# }
-# ],
-# "envConfig": {"DATA_SOURCE_NAME": "root:rootpw@(mysql:3306)/"},
-# "kubernetes": {
-# "readinessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9104,
-# },
-# "initialDelaySeconds": 10,
-# "periodSeconds": 10,
-# "timeoutSeconds": 5,
-# "successThreshold": 1,
-# "failureThreshold": 3,
-# },
-# "livenessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9104,
-# },
-# "initialDelaySeconds": 60,
-# "timeoutSeconds": 30,
-# "failureThreshold": 10,
-# },
-# },
-# },
-# ],
-# "kubernetesResources": {
-# "ingressResources": [
-# {
-# "name": "mysqld-exporter-ingress",
-# "annotations": {
-# "nginx.ingress.kubernetes.io/ssl-redirect": "false",
-# },
-# "spec": {
-# "rules": [
-# {
-# "host": "mysqld-exporter",
-# "http": {
-# "paths": [
-# {
-# "path": "/",
-# "backend": {
-# "serviceName": "mysqld-exporter",
-# "servicePort": 9104,
-# },
-# }
-# ]
-# },
-# }
-# ]
-# },
-# }
-# ],
-# },
-# }
-#
-# self.harness.charm.on.start.emit()
-#
-# # Initializing the mysql relation
-# relation_id = self.harness.add_relation("mysql", "mysql")
-# self.harness.add_relation_unit(relation_id, "mysql/0")
-# self.harness.update_relation_data(
-# relation_id,
-# "mysql/0",
-# {
-# "host": "mysql",
-# "port": "3306",
-# "user": "mano",
-# "password": "manopw",
-# "root_password": "rootpw",
-# },
-# )
-#
-# self.harness.update_config({"site_url": "http://mysqld-exporter"})
-#
-# pod_spec, _ = self.harness.get_pod_spec()
-#
-# self.assertDictEqual(expected_result, pod_spec)
-#
-# def test_ingress_resources_with_https(self) -> NoReturn:
-# """Test ingress resources with HTTPS."""
-# expected_result = {
-# "version": 3,
-# "containers": [
-# {
-# "name": "mysqld-exporter",
-# "imageDetails": self.harness.charm.image.fetch(),
-# "imagePullPolicy": "Always",
-# "ports": [
-# {
-# "name": "mysqld-exporter",
-# "containerPort": 9104,
-# "protocol": "TCP",
-# }
-# ],
-# "envConfig": {"DATA_SOURCE_NAME": "root:rootpw@(mysql:3306)/"},
-# "kubernetes": {
-# "readinessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9104,
-# },
-# "initialDelaySeconds": 10,
-# "periodSeconds": 10,
-# "timeoutSeconds": 5,
-# "successThreshold": 1,
-# "failureThreshold": 3,
-# },
-# "livenessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9104,
-# },
-# "initialDelaySeconds": 60,
-# "timeoutSeconds": 30,
-# "failureThreshold": 10,
-# },
-# },
-# },
-# ],
-# "kubernetesResources": {
-# "ingressResources": [
-# {
-# "name": "mysqld-exporter-ingress",
-# "annotations": {},
-# "spec": {
-# "rules": [
-# {
-# "host": "mysqld-exporter",
-# "http": {
-# "paths": [
-# {
-# "path": "/",
-# "backend": {
-# "serviceName": "mysqld-exporter",
-# "servicePort": 9104,
-# },
-# }
-# ]
-# },
-# }
-# ],
-# "tls": [
-# {
-# "hosts": ["mysqld-exporter"],
-# "secretName": "mysqld-exporter",
-# }
-# ],
-# },
-# }
-# ],
-# },
-# }
-#
-# self.harness.charm.on.start.emit()
-#
-# # Initializing the mysql relation
-# relation_id = self.harness.add_relation("mysql", "mysql")
-# self.harness.add_relation_unit(relation_id, "mysql/0")
-# self.harness.update_relation_data(
-# relation_id,
-# "mysql/0",
-# {
-# "host": "mysql",
-# "port": "3306",
-# "user": "mano",
-# "password": "manopw",
-# "root_password": "rootpw",
-# },
-# )
-#
-# self.harness.update_config(
-# {
-# "site_url": "https://mysqld-exporter",
-# "tls_secret_name": "mysqld-exporter",
-# }
-# )
-#
-# pod_spec, _ = self.harness.get_pod_spec()
-#
-# self.assertDictEqual(expected_result, pod_spec)
-#
-# def test_ingress_resources_with_https_and_ingress_whitelist(self) -> NoReturn:
-# """Test ingress resources with HTTPS and ingress whitelist."""
-# expected_result = {
-# "version": 3,
-# "containers": [
-# {
-# "name": "mysqld-exporter",
-# "imageDetails": self.harness.charm.image.fetch(),
-# "imagePullPolicy": "Always",
-# "ports": [
-# {
-# "name": "mysqld-exporter",
-# "containerPort": 9104,
-# "protocol": "TCP",
-# }
-# ],
-# "envConfig": {"DATA_SOURCE_NAME": "root:rootpw@(mysql:3306)/"},
-# "kubernetes": {
-# "readinessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9104,
-# },
-# "initialDelaySeconds": 10,
-# "periodSeconds": 10,
-# "timeoutSeconds": 5,
-# "successThreshold": 1,
-# "failureThreshold": 3,
-# },
-# "livenessProbe": {
-# "httpGet": {
-# "path": "/api/health",
-# "port": 9104,
-# },
-# "initialDelaySeconds": 60,
-# "timeoutSeconds": 30,
-# "failureThreshold": 10,
-# },
-# },
-# },
-# ],
-# "kubernetesResources": {
-# "ingressResources": [
-# {
-# "name": "mysqld-exporter-ingress",
-# "annotations": {
-# "nginx.ingress.kubernetes.io/whitelist-source-range": "0.0.0.0/0",
-# },
-# "spec": {
-# "rules": [
-# {
-# "host": "mysqld-exporter",
-# "http": {
-# "paths": [
-# {
-# "path": "/",
-# "backend": {
-# "serviceName": "mysqld-exporter",
-# "servicePort": 9104,
-# },
-# }
-# ]
-# },
-# }
-# ],
-# "tls": [
-# {
-# "hosts": ["mysqld-exporter"],
-# "secretName": "mysqld-exporter",
-# }
-# ],
-# },
-# }
-# ],
-# },
-# }
-#
-# self.harness.charm.on.start.emit()
-#
-# # Initializing the mysql relation
-# relation_id = self.harness.add_relation("mysql", "mysql")
-# self.harness.add_relation_unit(relation_id, "mysql/0")
-# self.harness.update_relation_data(
-# relation_id,
-# "mysql/0",
-# {
-# "host": "mysql",
-# "port": "3306",
-# "user": "mano",
-# "password": "manopw",
-# "root_password": "rootpw",
-# },
-# )
-#
-# self.harness.update_config(
-# {
-# "site_url": "https://mysqld-exporter",
-# "tls_secret_name": "mysqld-exporter",
-# "ingress_whitelist_source_range": "0.0.0.0/0",
-# }
-# )
-#
-# pod_spec, _ = self.harness.get_pod_spec()
-#
-# self.assertDictEqual(expected_result, pod_spec)
-#
-# def test_on_mysql_unit_relation_changed(self) -> NoReturn:
-# """Test to see if mysql relation is updated."""
-# self.harness.charm.on.start.emit()
-#
-# relation_id = self.harness.add_relation("mysql", "mysql")
-# self.harness.add_relation_unit(relation_id, "mysql/0")
-# self.harness.update_relation_data(
-# relation_id,
-# "mysql/0",
-# {
-# "host": "mysql",
-# "port": "3306",
-# "user": "mano",
-# "password": "manopw",
-# "root_password": "rootpw",
-# },
-# )
-#
-# # Verifying status
-# self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
-#
-# def test_publish_target_info(self) -> NoReturn:
-# """Test to see if target relation is updated."""
-# expected_result = {
-# "hostname": "mysqld-exporter",
-# "port": "9104",
-# "metrics_path": "/metrics",
-# "scrape_interval": "30s",
-# "scrape_timeout": "15s",
-# }
-#
-# self.harness.charm.on.start.emit()
-#
-# relation_id = self.harness.add_relation("prometheus-scrape", "prometheus")
-# self.harness.add_relation_unit(relation_id, "prometheus/0")
-# relation_data = self.harness.get_relation_data(relation_id, "mysqld-exporter/0")
-#
-# self.assertDictEqual(expected_result, relation_data)
-#
-# def test_publish_scrape_info_with_site_url(self) -> NoReturn:
-# """Test to see if target relation is updated."""
-# expected_result = {
-# "hostname": "mysqld-exporter-osm",
-# "port": "80",
-# "metrics_path": "/metrics",
-# "scrape_interval": "30s",
-# "scrape_timeout": "15s",
-# }
-#
-# self.harness.charm.on.start.emit()
-#
-# self.harness.update_config({"site_url": "http://mysqld-exporter-osm"})
-#
-# relation_id = self.harness.add_relation("prometheus-scrape", "prometheus")
-# self.harness.add_relation_unit(relation_id, "prometheus/0")
-# relation_data = self.harness.get_relation_data(relation_id, "mysqld-exporter/0")
-#
-# self.assertDictEqual(expected_result, relation_data)
-#
-# def test_publish_dashboard_info(self) -> NoReturn:
-# """Test to see if dashboard relation is updated."""
-# self.harness.charm.on.start.emit()
-#
-# relation_id = self.harness.add_relation("grafana-dashboard", "grafana")
-# self.harness.add_relation_unit(relation_id, "grafana/0")
-# relation_data = self.harness.get_relation_data(relation_id, "mysqld-exporter/0")
-#
-# self.assertTrue("dashboard" in relation_data)
-# self.assertTrue(len(relation_data["dashboard"]) > 0)
-# self.assertEqual(relation_data["name"], "osm-mysql")
-#
-#
-# if __name__ == "__main__":
-# unittest.main()
diff --git a/installers/charm/mysqld-exporter/tests/test_pod_spec.py b/installers/charm/mysqld-exporter/tests/test_pod_spec.py
deleted file mode 100644
index a9c29eff..00000000
--- a/installers/charm/mysqld-exporter/tests/test_pod_spec.py
+++ /dev/null
@@ -1,513 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-from typing import NoReturn
-import unittest
-
-import pod_spec
-
-
-class TestPodSpec(unittest.TestCase):
- """Pod spec unit tests."""
-
- def test_make_pod_ports(self) -> NoReturn:
- """Testing make pod ports."""
- port = 9104
-
- expected_result = [
- {
- "name": "mysqld-exporter",
- "containerPort": port,
- "protocol": "TCP",
- }
- ]
-
- pod_ports = pod_spec._make_pod_ports(port)
-
- self.assertListEqual(expected_result, pod_ports)
-
- def test_make_pod_envconfig(self) -> NoReturn:
- """Teting make pod envconfig."""
- config = {}
- relation_state = {
- "mysql_host": "mysql",
- "mysql_port": "3306",
- "mysql_user": "mano",
- "mysql_password": "manopw",
- "mysql_root_password": "rootpw",
- }
-
- expected_result = {
- "DATA_SOURCE_NAME": "root:{mysql_root_password}@({mysql_host}:{mysql_port})/".format(
- **relation_state
- )
- }
-
- pod_envconfig = pod_spec._make_pod_envconfig(config, relation_state)
-
- self.assertDictEqual(expected_result, pod_envconfig)
-
- def test_make_pod_ingress_resources_without_site_url(self) -> NoReturn:
- """Testing make pod ingress resources without site_url."""
- config = {
- "site_url": "",
- "cluster_issuer": "",
- }
- app_name = "mysqld-exporter"
- port = 9104
-
- pod_ingress_resources = pod_spec._make_pod_ingress_resources(
- config, app_name, port
- )
-
- self.assertIsNone(pod_ingress_resources)
-
- def test_make_pod_ingress_resources(self) -> NoReturn:
- """Testing make pod ingress resources."""
- config = {
- "site_url": "http://mysqld-exporter",
- "cluster_issuer": "",
- "ingress_whitelist_source_range": "",
- }
- app_name = "mysqld-exporter"
- port = 9104
-
- expected_result = [
- {
- "name": f"{app_name}-ingress",
- "annotations": {
- "nginx.ingress.kubernetes.io/ssl-redirect": "false",
- },
- "spec": {
- "rules": [
- {
- "host": app_name,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ]
- },
- }
- ]
-
- pod_ingress_resources = pod_spec._make_pod_ingress_resources(
- config, app_name, port
- )
-
- self.assertListEqual(expected_result, pod_ingress_resources)
-
- def test_make_pod_ingress_resources_with_whitelist_source_range(self) -> NoReturn:
- """Testing make pod ingress resources with whitelist_source_range."""
- config = {
- "site_url": "http://mysqld-exporter",
- "cluster_issuer": "",
- "ingress_whitelist_source_range": "0.0.0.0/0",
- }
- app_name = "mysqld-exporter"
- port = 9104
-
- expected_result = [
- {
- "name": f"{app_name}-ingress",
- "annotations": {
- "nginx.ingress.kubernetes.io/ssl-redirect": "false",
- "nginx.ingress.kubernetes.io/whitelist-source-range": config[
- "ingress_whitelist_source_range"
- ],
- },
- "spec": {
- "rules": [
- {
- "host": app_name,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ]
- },
- }
- ]
-
- pod_ingress_resources = pod_spec._make_pod_ingress_resources(
- config, app_name, port
- )
-
- self.assertListEqual(expected_result, pod_ingress_resources)
-
- def test_make_pod_ingress_resources_with_https(self) -> NoReturn:
- """Testing make pod ingress resources with HTTPs."""
- config = {
- "site_url": "https://mysqld-exporter",
- "cluster_issuer": "",
- "ingress_whitelist_source_range": "",
- "tls_secret_name": "",
- }
- app_name = "mysqld-exporter"
- port = 9104
-
- expected_result = [
- {
- "name": f"{app_name}-ingress",
- "annotations": {},
- "spec": {
- "rules": [
- {
- "host": app_name,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ],
- "tls": [{"hosts": [app_name]}],
- },
- }
- ]
-
- pod_ingress_resources = pod_spec._make_pod_ingress_resources(
- config, app_name, port
- )
-
- self.assertListEqual(expected_result, pod_ingress_resources)
-
- def test_make_pod_ingress_resources_with_https_tls_secret_name(self) -> NoReturn:
- """Testing make pod ingress resources with HTTPs and TLS secret name."""
- config = {
- "site_url": "https://mysqld-exporter",
- "cluster_issuer": "",
- "ingress_whitelist_source_range": "",
- "tls_secret_name": "secret_name",
- }
- app_name = "mysqld-exporter"
- port = 9104
-
- expected_result = [
- {
- "name": f"{app_name}-ingress",
- "annotations": {},
- "spec": {
- "rules": [
- {
- "host": app_name,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ],
- "tls": [
- {"hosts": [app_name], "secretName": config["tls_secret_name"]}
- ],
- },
- }
- ]
-
- pod_ingress_resources = pod_spec._make_pod_ingress_resources(
- config, app_name, port
- )
-
- self.assertListEqual(expected_result, pod_ingress_resources)
-
- def test_make_readiness_probe(self) -> NoReturn:
- """Testing make readiness probe."""
- port = 9104
-
- expected_result = {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 10,
- "periodSeconds": 10,
- "timeoutSeconds": 5,
- "successThreshold": 1,
- "failureThreshold": 3,
- }
-
- readiness_probe = pod_spec._make_readiness_probe(port)
-
- self.assertDictEqual(expected_result, readiness_probe)
-
- def test_make_liveness_probe(self) -> NoReturn:
- """Testing make liveness probe."""
- port = 9104
-
- expected_result = {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 60,
- "timeoutSeconds": 30,
- "failureThreshold": 10,
- }
-
- liveness_probe = pod_spec._make_liveness_probe(port)
-
- self.assertDictEqual(expected_result, liveness_probe)
-
- def test_make_pod_spec(self) -> NoReturn:
- """Testing make pod spec."""
- image_info = {"upstream-source": "bitnami/mysqld-exporter:latest"}
- config = {
- "site_url": "",
- "cluster_issuer": "",
- }
- relation_state = {
- "mysql_host": "mysql",
- "mysql_port": "3306",
- "mysql_user": "mano",
- "mysql_password": "manopw",
- "mysql_root_password": "rootpw",
- }
- app_name = "mysqld-exporter"
- port = 9104
-
- expected_result = {
- "version": 3,
- "containers": [
- {
- "name": app_name,
- "imageDetails": image_info,
- "imagePullPolicy": "Always",
- "ports": [
- {
- "name": app_name,
- "containerPort": port,
- "protocol": "TCP",
- }
- ],
- "envConfig": {
- "DATA_SOURCE_NAME": "root:{mysql_root_password}@({mysql_host}:{mysql_port})/".format(
- **relation_state
- )
- },
- "kubernetes": {
- "readinessProbe": {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 10,
- "periodSeconds": 10,
- "timeoutSeconds": 5,
- "successThreshold": 1,
- "failureThreshold": 3,
- },
- "livenessProbe": {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 60,
- "timeoutSeconds": 30,
- "failureThreshold": 10,
- },
- },
- }
- ],
- "kubernetesResources": {"ingressResources": []},
- }
-
- spec = pod_spec.make_pod_spec(
- image_info, config, relation_state, app_name, port
- )
-
- self.assertDictEqual(expected_result, spec)
-
- def test_make_pod_spec_with_ingress(self) -> NoReturn:
- """Testing make pod spec."""
- image_info = {"upstream-source": "bitnami/mysqld-exporter:latest"}
- config = {
- "site_url": "https://mysqld-exporter",
- "cluster_issuer": "",
- "tls_secret_name": "mysqld-exporter",
- "ingress_whitelist_source_range": "0.0.0.0/0",
- }
- relation_state = {
- "mysql_host": "mysql",
- "mysql_port": "3306",
- "mysql_user": "mano",
- "mysql_password": "manopw",
- "mysql_root_password": "rootpw",
- }
- app_name = "mysqld-exporter"
- port = 9104
-
- expected_result = {
- "version": 3,
- "containers": [
- {
- "name": app_name,
- "imageDetails": image_info,
- "imagePullPolicy": "Always",
- "ports": [
- {
- "name": app_name,
- "containerPort": port,
- "protocol": "TCP",
- }
- ],
- "envConfig": {
- "DATA_SOURCE_NAME": "root:{mysql_root_password}@({mysql_host}:{mysql_port})/".format(
- **relation_state
- )
- },
- "kubernetes": {
- "readinessProbe": {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 10,
- "periodSeconds": 10,
- "timeoutSeconds": 5,
- "successThreshold": 1,
- "failureThreshold": 3,
- },
- "livenessProbe": {
- "httpGet": {
- "path": "/api/health",
- "port": port,
- },
- "initialDelaySeconds": 60,
- "timeoutSeconds": 30,
- "failureThreshold": 10,
- },
- },
- }
- ],
- "kubernetesResources": {
- "ingressResources": [
- {
- "name": "{}-ingress".format(app_name),
- "annotations": {
- "nginx.ingress.kubernetes.io/whitelist-source-range": config.get(
- "ingress_whitelist_source_range"
- ),
- },
- "spec": {
- "rules": [
- {
- "host": app_name,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ],
- "tls": [
- {
- "hosts": [app_name],
- "secretName": config.get("tls_secret_name"),
- }
- ],
- },
- }
- ],
- },
- }
-
- spec = pod_spec.make_pod_spec(
- image_info, config, relation_state, app_name, port
- )
-
- self.assertDictEqual(expected_result, spec)
-
- def test_make_pod_spec_without_image_info(self) -> NoReturn:
- """Testing make pod spec without image_info."""
- image_info = None
- config = {
- "site_url": "",
- "cluster_issuer": "",
- }
- relation_state = {
- "mysql_host": "mysql",
- "mysql_port": 3306,
- "mysql_user": "mano",
- "mysql_password": "manopw",
- "mysql_root_password": "rootpw",
- }
- app_name = "mysqld-exporter"
- port = 9104
-
- spec = pod_spec.make_pod_spec(
- image_info, config, relation_state, app_name, port
- )
-
- self.assertIsNone(spec)
-
- def test_make_pod_spec_without_relation_state(self) -> NoReturn:
- """Testing make pod spec without relation_state."""
- image_info = {"upstream-source": "bitnami/mysqld-exporter:latest"}
- config = {
- "site_url": "",
- "cluster_issuer": "",
- }
- relation_state = {}
- app_name = "mysqld-exporter"
- port = 9104
-
- with self.assertRaises(ValueError):
- pod_spec.make_pod_spec(image_info, config, relation_state, app_name, port)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/installers/charm/mysqld-exporter/tox.ini b/installers/charm/mysqld-exporter/tox.ini
deleted file mode 100644
index 4c7970df..00000000
--- a/installers/charm/mysqld-exporter/tox.ini
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-#######################################################################################
-
-[tox]
-envlist = black, cover, flake8, pylint, yamllint, safety
-skipsdist = true
-
-[tox:jenkins]
-toxworkdir = /tmp/.tox
-
-[testenv]
-basepython = python3.8
-setenv = VIRTUAL_ENV={envdir}
- PYTHONDONTWRITEBYTECODE = 1
-deps = -r{toxinidir}/requirements.txt
-
-
-#######################################################################################
-[testenv:black]
-deps = black
-commands =
- black --check --diff src/ tests/
-
-
-#######################################################################################
-[testenv:cover]
-deps = {[testenv]deps}
- -r{toxinidir}/requirements-test.txt
- coverage
- nose2
-commands =
- sh -c 'rm -f nosetests.xml'
- coverage erase
- nose2 -C --coverage src
- coverage report --omit='*tests*'
- coverage html -d ./cover --omit='*tests*'
- coverage xml -o coverage.xml --omit=*tests*
-whitelist_externals = sh
-
-
-#######################################################################################
-[testenv:flake8]
-deps = flake8
- flake8-import-order
-commands =
- flake8 src/ tests/
-
-
-#######################################################################################
-[testenv:pylint]
-deps = {[testenv]deps}
- -r{toxinidir}/requirements-test.txt
- pylint==2.10.2
-commands =
- pylint -E src/ tests/
-
-
-#######################################################################################
-[testenv:safety]
-setenv =
- LC_ALL=C.UTF-8
- LANG=C.UTF-8
-deps = {[testenv]deps}
- safety
-commands =
- - safety check --full-report
-
-
-#######################################################################################
-[testenv:yamllint]
-deps = {[testenv]deps}
- -r{toxinidir}/requirements-test.txt
- yamllint
-commands = yamllint .
-
-#######################################################################################
-[testenv:build]
-passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY
-whitelist_externals =
- charmcraft
- sh
-commands =
- charmcraft pack
- sh -c 'ubuntu_version=20.04; \
- architectures="amd64-aarch64-arm64"; \
- charm_name=`cat metadata.yaml | grep -E "^name: " | cut -f 2 -d " "`; \
- mv $charm_name"_ubuntu-"$ubuntu_version-$architectures.charm $charm_name.charm'
-
-#######################################################################################
-[flake8]
-ignore =
- W291,
- W293,
- W503,
- E123,
- E125,
- E226,
- E241,
-exclude =
- .git,
- __pycache__,
- .tox,
-max-line-length = 120
-show-source = True
-builtins = _
-max-complexity = 10
-import-order-style = google
diff --git a/installers/charm/osm-keystone/.gitignore b/installers/charm/osm-keystone/.gitignore
deleted file mode 100644
index 87d0a587..00000000
--- a/installers/charm/osm-keystone/.gitignore
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-venv/
-build/
-*.charm
-.tox/
-.coverage
-coverage.xml
-__pycache__/
-*.py[cod]
-.vscode
\ No newline at end of file
diff --git a/installers/charm/osm-keystone/.jujuignore b/installers/charm/osm-keystone/.jujuignore
deleted file mode 100644
index 17c7a8bb..00000000
--- a/installers/charm/osm-keystone/.jujuignore
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-/venv
-*.py[cod]
-*.charm
diff --git a/installers/charm/osm-keystone/CONTRIBUTING.md b/installers/charm/osm-keystone/CONTRIBUTING.md
deleted file mode 100644
index 3d86cf8e..00000000
--- a/installers/charm/osm-keystone/CONTRIBUTING.md
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-# Contributing
-
-## Overview
-
-This documents explains the processes and practices recommended for contributing enhancements to
-the Keystone charm.
-
-- If you would like to chat with us about your use-cases or proposed implementation, you can reach
- us at [OSM public channel](https://opensourcemano.slack.com/archives/CA2TLA48Y)
-- Familiarising yourself with the [Charmed Operator Framework](https://juju.is/docs/sdk) library
- will help you a lot when working on new features or bug fixes.
-- All enhancements require review before being merged. Code review typically examines
- - code quality
- - test coverage
- - user experience for Juju administrators this charm.
-- Please help us out in ensuring easy to review branches by rebasing your gerrit patch onto
- the `master` branch.
-
-## Developing
-
-You can use the environments created by `tox` for development:
-
-```shell
-tox --notest -e unit
-source .tox/unit/bin/activate
-```
-
-### Testing
-
-```shell
-tox -e fmt # update your code according to linting rules
-tox -e lint # code style
-tox -e unit # unit tests
-# tox -e integration # integration tests
-tox # runs 'lint' and 'unit' environments
-```
-
-## Build charm
-
-Build the charm in this git repository using:
-
-```shell
-charmcraft pack
-```
-
-### Deploy
-
-```bash
-# Create a model
-juju add-model test-keystone
-# Enable DEBUG logging
-juju model-config logging-config="=INFO;unit=DEBUG"
-# Deploy the charm
-juju deploy ./keystone_ubuntu-22.04-amd64.charm \
- --resource keystone-image=opensourcemano/keystone:testing-daily --series jammy
-```
diff --git a/installers/charm/osm-keystone/LICENSE b/installers/charm/osm-keystone/LICENSE
deleted file mode 100644
index d6456956..00000000
--- a/installers/charm/osm-keystone/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/installers/charm/osm-keystone/README.md b/installers/charm/osm-keystone/README.md
deleted file mode 100644
index 08761b9f..00000000
--- a/installers/charm/osm-keystone/README.md
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-# Keystone Operator
-
-[](https://github.com/psf/black/tree/main)
-
-[](https://charmhub.io/osm-keystone)
-
-## Description
-
-This charm deploys Keystone in K8s. It is mainly developed to be used as part of the OSM deployment.
-
-## Usage
-
-The Keystone Operator may be deployed using the Juju command line as in
-
-```shell
-$ juju add-model keystone
-$ juju deploy charmed-osm-mariadb-k8s db
-$ juju deploy osm-keystone --trust
-$ juju relate osm-keystone db
-```
-
-## OCI Images
-
-- [keystone](https://hub.docker.com/r/opensourcemano/keystone)
-
-## Contributing
-
-Please see the [Juju SDK docs](https://juju.is/docs/sdk) for guidelines
-on enhancements to this charm following best practice guidelines, and
-`CONTRIBUTING.md` for developer guidance.
diff --git a/installers/charm/osm-keystone/actions.yaml b/installers/charm/osm-keystone/actions.yaml
deleted file mode 100644
index 85ed7e6e..00000000
--- a/installers/charm/osm-keystone/actions.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-
-db-sync:
- description: Execute `keystone-manage db_sync` in the workload container.
diff --git a/installers/charm/osm-keystone/charmcraft.yaml b/installers/charm/osm-keystone/charmcraft.yaml
deleted file mode 100644
index c8374f30..00000000
--- a/installers/charm/osm-keystone/charmcraft.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-
-type: "charm"
-bases:
- - build-on:
- - name: "ubuntu"
- channel: "22.04"
- run-on:
- - name: "ubuntu"
- channel: "22.04"
-parts:
- charm:
- build-packages:
- - git
diff --git a/installers/charm/osm-keystone/config.yaml b/installers/charm/osm-keystone/config.yaml
deleted file mode 100644
index 7312bb4d..00000000
--- a/installers/charm/osm-keystone/config.yaml
+++ /dev/null
@@ -1,221 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-
-options:
- region-id:
- type: string
- description: Region ID to be created when starting the service
- default: RegionOne
- keystone-db-password:
- type: string
- description: Keystone DB Password
- default: admin
- admin-username:
- type: string
- description: Admin username to be created when starting the service
- default: admin
- admin-password:
- type: string
- description: Admin password to be created when starting the service
- default: admin
- admin-project:
- type: string
- description: Admin project to be created when starting the service
- default: admin
- service-username:
- type: string
- description: Service Username to be created when starting the service
- default: nbi
- service-password:
- type: string
- description: Service Password to be created when starting the service
- default: nbi
- service-project:
- type: string
- description: Service Project to be created when starting the service
- default: service
- user-domain-name:
- type: string
- description: User domain name (Hardcoded in the container start.sh script)
- default: default
- project-domain-name:
- type: string
- description: |
- Project domain name (Hardcoded in the container start.sh script)
- default: default
- token-expiration:
- type: int
- description: Token keys expiration in seconds
- default: 3600
- ldap-enabled:
- type: boolean
- description: Boolean to enable/disable LDAP authentication
- default: false
- ldap-authentication-domain-name:
- type: string
- description: Name of the domain which use LDAP authentication
- default: ""
- ldap-url:
- type: string
- description: URL of the LDAP server
- default: "ldap://localhost"
- ldap-bind-user:
- type: string
- description: User to bind and search for users
- default: ""
- ldap-bind-password:
- type: string
- description: Password to bind and search for users
- default: ""
- ldap-chase-referrals:
- type: string
- description: |
- Sets keystoneâs referral chasing behavior across directory partitions.
- If left unset, the systemâs default behavior will be used.
- default: ""
- ldap-page-size:
- type: int
- description: |
- Defines the maximum number of results per page that keystone should
- request from the LDAP server when listing objects. A value of zero (0)
- disables paging.
- default: 0
- ldap-user-tree-dn:
- type: string
- description: |
- Root of the tree in LDAP server in which Keystone will search for users
- default: ""
- ldap-user-objectclass:
- type: string
- description: |
- LDAP object class that Keystone will filter on within user_tree_dn to
- find user objects. Any objects of other classes will be ignored.
- default: inetOrgPerson
- ldap-user-id-attribute:
- type: string
- description: |
- This set of options define the mapping to LDAP attributes for the three
- key user attributes supported by Keystone. The LDAP attribute chosen for
- user_id must be something that is immutable for a user and no more than
- 64 characters in length. Notice that Distinguished Name (DN) may be
- longer than 64 characters and thus is not suitable. An uid, or mail may
- be appropriate.
- default: cn
- ldap-user-name-attribute:
- type: string
- description: |
- This set of options define the mapping to LDAP attributes for the three
- key user attributes supported by Keystone. The LDAP attribute chosen for
- user_id must be something that is immutable for a user and no more than
- 64 characters in length. Notice that Distinguished Name (DN) may be
- longer than 64 characters and thus is not suitable. An uid, or mail may
- be appropriate.
- default: sn
- ldap-user-pass-attribute:
- type: string
- description: |
- This set of options define the mapping to LDAP attributes for the three
- key user attributes supported by Keystone. The LDAP attribute chosen for
- user_id must be something that is immutable for a user and no more than
- 64 characters in length. Notice that Distinguished Name (DN) may be
- longer than 64 characters and thus is not suitable. An uid, or mail may
- be appropriate.
- default: userPassword
- ldap-user-filter:
- type: string
- description: |
- This filter option allow additional filter (over and above
- user_objectclass) to be included into the search of user. One common use
- of this is to provide more efficient searching, where the recommended
- search for user objects is (&(objectCategory=person)(objectClass=user)).
- By specifying user_objectclass as user and user_filter as
- objectCategory=person in the Keystone configuration file, this can be
- achieved.
- default: ""
- ldap-user-enabled-attribute:
- type: string
- description: |
- In Keystone, a user entity can be either enabled or disabled. Setting
- the above option will give a mapping to an equivalent attribute in LDAP,
- allowing your LDAP management tools to disable a user.
- default: enabled
- ldap-user-enabled-mask:
- type: int
- description: |
- Some LDAP schemas, rather than having a dedicated attribute for user
- enablement, use a bit within a general control attribute (such as
- userAccountControl) to indicate this. Setting user_enabled_mask will
- cause Keystone to look at only the status of this bit in the attribute
- specified by user_enabled_attribute, with the bit set indicating the
- user is enabled.
- default: 0
- ldap-user-enabled-default:
- type: string
- description: |
- Most LDAP servers use a boolean or bit in a control field to indicate
- enablement. However, some schemas might use an integer value in an
- attribute. In this situation, set user_enabled_default to the integer
- value that represents a user being enabled.
- default: "true"
- ldap-user-enabled-invert:
- type: boolean
- description: |
- Some LDAP schemas have an âaccount lockedâ attribute, which is the
- equivalent to account being âdisabled.â In order to map this to the
- Keystone enabled attribute, you can utilize the user_enabled_invert
- setting in conjunction with user_enabled_attribute to map the lock
- status to disabled in Keystone.
- default: false
- ldap-group-objectclass:
- type: string
- description: The LDAP object class to use for groups.
- default: groupOfNames
- ldap-group-tree-dn:
- type: string
- description: The search base to use for groups.
- default: ""
- ldap-use-starttls:
- type: boolean
- description: |
- Enable Transport Layer Security (TLS) for providing a secure connection
- from Keystone to LDAP (StartTLS, not LDAPS).
- default: false
- ldap-tls-cacert-base64:
- type: string
- description: |
- CA certificate in Base64 format (if you have the PEM file, text inside
- "-----BEGIN CERTIFICATE-----"/"-----END CERTIFICATE-----" tags).
- default: ""
- ldap-tls-req-cert:
- type: string
- description: |
- Defines how the certificates are checked for validity in the client
- (i.e., Keystone end) of the secure connection (this doesnât affect what
- level of checking the server is doing on the certificates it receives
- from Keystone). Possible values are "demand", "never", and "allow". The
- default of demand means the client always checks the certificate and
- will drop the connection if it is not provided or invalid. never is the
- oppositeâit never checks it, nor requires it to be provided. allow means
- that if it is not provided then the connection is allowed to continue,
- but if it is provided it will be checkedâand if invalid, the connection
- will be dropped.
- default: demand
- mysql-uri:
- type: string
- description: |
- Mysql URI with the following format:
- mysql://:@:/
diff --git a/installers/charm/osm-keystone/lib/charms/observability_libs/v0/kubernetes_service_patch.py b/installers/charm/osm-keystone/lib/charms/observability_libs/v0/kubernetes_service_patch.py
deleted file mode 100644
index 39b364b1..00000000
--- a/installers/charm/osm-keystone/lib/charms/observability_libs/v0/kubernetes_service_patch.py
+++ /dev/null
@@ -1,253 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-"""# KubernetesServicePatch Library.
-
-This library is designed to enable developers to more simply patch the Kubernetes Service created
-by Juju during the deployment of a sidecar charm. When sidecar charms are deployed, Juju creates a
-service named after the application in the namespace (named after the Juju model). This service by
-default contains a "placeholder" port, which is 65536/TCP.
-
-When modifying the default set of resources managed by Juju, one must consider the lifecycle of the
-charm. In this case, any modifications to the default service (created during deployment), will
-be overwritten during a charm upgrade.
-
-When intialised, this library binds a handler to the parent charm's `install` and `upgrade_charm`
-events which applies the patch to the cluster. This should ensure that the service ports are
-correct throughout the charm's life.
-
-The constructor simply takes a reference to the parent charm, and a list of tuples that each define
-a port for the service, where each tuple contains:
-
-- a name for the port
-- port for the service to listen on
-- optionally: a targetPort for the service (the port in the container!)
-- optionally: a nodePort for the service (for NodePort or LoadBalancer services only!)
-- optionally: a name of the service (in case service name needs to be patched as well)
-
-## Getting Started
-
-To get started using the library, you just need to fetch the library using `charmcraft`. **Note
-that you also need to add `lightkube` and `lightkube-models` to your charm's `requirements.txt`.**
-
-```shell
-cd some-charm
-charmcraft fetch-lib charms.observability_libs.v0.kubernetes_service_patch
-echo <<-EOF >> requirements.txt
-lightkube
-lightkube-models
-EOF
-```
-
-Then, to initialise the library:
-
-For ClusterIP services:
-```python
-# ...
-from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch
-
-class SomeCharm(CharmBase):
- def __init__(self, *args):
- # ...
- self.service_patcher = KubernetesServicePatch(self, [(f"{self.app.name}", 8080)])
- # ...
-```
-
-For LoadBalancer/NodePort services:
-```python
-# ...
-from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch
-
-class SomeCharm(CharmBase):
- def __init__(self, *args):
- # ...
- self.service_patcher = KubernetesServicePatch(
- self, [(f"{self.app.name}", 443, 443, 30666)], "LoadBalancer"
- )
- # ...
-```
-
-Additionally, you may wish to use mocks in your charm's unit testing to ensure that the library
-does not try to make any API calls, or open any files during testing that are unlikely to be
-present, and could break your tests. The easiest way to do this is during your test `setUp`:
-
-```python
-# ...
-
-@patch("charm.KubernetesServicePatch", lambda x, y: None)
-def setUp(self, *unused):
- self.harness = Harness(SomeCharm)
- # ...
-```
-"""
-
-import logging
-from types import MethodType
-from typing import Literal, Sequence, Tuple, Union
-
-from lightkube import ApiError, Client
-from lightkube.models.core_v1 import ServicePort, ServiceSpec
-from lightkube.models.meta_v1 import ObjectMeta
-from lightkube.resources.core_v1 import Service
-from lightkube.types import PatchType
-from ops.charm import CharmBase
-from ops.framework import Object
-
-logger = logging.getLogger(__name__)
-
-# The unique Charmhub library identifier, never change it
-LIBID = "0042f86d0a874435adef581806cddbbb"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 5
-
-PortDefinition = Union[Tuple[str, int], Tuple[str, int, int], Tuple[str, int, int, int]]
-ServiceType = Literal["ClusterIP", "LoadBalancer"]
-
-
-class KubernetesServicePatch(Object):
- """A utility for patching the Kubernetes service set up by Juju."""
-
- def __init__(
- self,
- charm: CharmBase,
- ports: Sequence[PortDefinition],
- service_name: str = None,
- service_type: ServiceType = "ClusterIP",
- ):
- """Constructor for KubernetesServicePatch.
-
- Args:
- charm: the charm that is instantiating the library.
- ports: a list of tuples (name, port, targetPort, nodePort) for every service port.
- service_name: allows setting custom name to the patched service. If none given,
- application name will be used.
- service_type: desired type of K8s service. Default value is in line with ServiceSpec's
- default value.
- """
- super().__init__(charm, "kubernetes-service-patch")
- self.charm = charm
- self.service_name = service_name if service_name else self._app
- self.service = self._service_object(ports, service_name, service_type)
-
- # Make mypy type checking happy that self._patch is a method
- assert isinstance(self._patch, MethodType)
- # Ensure this patch is applied during the 'install' and 'upgrade-charm' events
- self.framework.observe(charm.on.install, self._patch)
- self.framework.observe(charm.on.upgrade_charm, self._patch)
-
- def _service_object(
- self,
- ports: Sequence[PortDefinition],
- service_name: str = None,
- service_type: ServiceType = "ClusterIP",
- ) -> Service:
- """Creates a valid Service representation for Alertmanager.
-
- Args:
- ports: a list of tuples of the form (name, port) or (name, port, targetPort)
- or (name, port, targetPort, nodePort) for every service port. If the 'targetPort'
- is omitted, it is assumed to be equal to 'port', with the exception of NodePort
- and LoadBalancer services, where all port numbers have to be specified.
- service_name: allows setting custom name to the patched service. If none given,
- application name will be used.
- service_type: desired type of K8s service. Default value is in line with ServiceSpec's
- default value.
-
- Returns:
- Service: A valid representation of a Kubernetes Service with the correct ports.
- """
- if not service_name:
- service_name = self._app
- return Service(
- apiVersion="v1",
- kind="Service",
- metadata=ObjectMeta(
- namespace=self._namespace,
- name=service_name,
- labels={"app.kubernetes.io/name": service_name},
- ),
- spec=ServiceSpec(
- selector={"app.kubernetes.io/name": service_name},
- ports=[
- ServicePort(
- name=p[0],
- port=p[1],
- targetPort=p[2] if len(p) > 2 else p[1], # type: ignore[misc]
- nodePort=p[3] if len(p) > 3 else None, # type: ignore[arg-type, misc]
- )
- for p in ports
- ],
- type=service_type,
- ),
- )
-
- def _patch(self, _) -> None:
- """Patch the Kubernetes service created by Juju to map the correct port.
-
- Raises:
- PatchFailed: if patching fails due to lack of permissions, or otherwise.
- """
- if not self.charm.unit.is_leader():
- return
-
- client = Client()
- try:
- client.patch(Service, self._app, self.service, patch_type=PatchType.MERGE)
- except ApiError as e:
- if e.status.code == 403:
- logger.error("Kubernetes service patch failed: `juju trust` this application.")
- else:
- logger.error("Kubernetes service patch failed: %s", str(e))
- else:
- logger.info("Kubernetes service '%s' patched successfully", self._app)
-
- def is_patched(self) -> bool:
- """Reports if the service patch has been applied.
-
- Returns:
- bool: A boolean indicating if the service patch has been applied.
- """
- client = Client()
- # Get the relevant service from the cluster
- service = client.get(Service, name=self.service_name, namespace=self._namespace)
- # Construct a list of expected ports, should the patch be applied
- expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports]
- # Construct a list in the same manner, using the fetched service
- fetched_ports = [(p.port, p.targetPort) for p in service.spec.ports] # type: ignore[attr-defined] # noqa: E501
- return expected_ports == fetched_ports
-
- @property
- def _app(self) -> str:
- """Name of the current Juju application.
-
- Returns:
- str: A string containing the name of the current Juju application.
- """
- return self.charm.app.name
-
- @property
- def _namespace(self) -> str:
- """The Kubernetes namespace we're running in.
-
- Returns:
- str: A string containing the name of the current Kubernetes namespace.
- """
- with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f:
- return f.read().strip()
diff --git a/installers/charm/osm-keystone/metadata.yaml b/installers/charm/osm-keystone/metadata.yaml
deleted file mode 100644
index 61a412ba..00000000
--- a/installers/charm/osm-keystone/metadata.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-name: osm-keystone
-display-name: Keystone
-description: |
- Keystone operator used for Charmed OSM
-
-summary: |
- Keystone operator used for Charmed OSM
-
-containers:
- keystone:
- resource: keystone-image
-
-resources:
- keystone-image:
- type: oci-image
- description: OCI image for Keystone
- upstream-source: opensourcemano/keystone:testing-daily
-
-requires:
- db:
- interface: mysql
- limit: 1
-
-peers:
- cluster:
- interface: cluster
-
-provides:
- keystone:
- interface: keystone
diff --git a/installers/charm/osm-keystone/pyproject.toml b/installers/charm/osm-keystone/pyproject.toml
deleted file mode 100644
index af62f24a..00000000
--- a/installers/charm/osm-keystone/pyproject.toml
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-# Testing tools configuration
-[tool.coverage.run]
-branch = true
-
-[tool.coverage.report]
-show_missing = true
-
-[tool.pytest.ini_options]
-minversion = "6.0"
-log_cli_level = "INFO"
-
-# Formatting tools configuration
-[tool.black]
-line-length = 99
-target-version = ["py38"]
-
-[tool.isort]
-profile = "black"
-
-# Linting tools configuration
-[tool.flake8]
-max-line-length = 99
-max-doc-length = 99
-max-complexity = 10
-exclude = [".git", "__pycache__", ".tox", "build", "dist", "*.egg_info", "venv"]
-select = ["E", "W", "F", "C", "N", "R", "D", "H"]
-# Ignore W503, E501 because using black creates errors with this
-# Ignore D107 Missing docstring in __init__
-ignore = ["W503", "E501", "D107"]
-# D100, D101, D102, D103: Ignore missing docstrings in tests
-per-file-ignores = ["tests/*:D100,D101,D102,D103,D104"]
-docstring-convention = "google"
-# Check for properly formatted copyright header in each file
-copyright-check = "True"
-copyright-author = "Canonical Ltd."
-copyright-regexp = "Copyright\\s\\d{4}([-,]\\d{4})*\\s+%(author)s"
-
-[tool.bandit]
-tests = ["B201", "B301"]
diff --git a/installers/charm/osm-keystone/requirements.txt b/installers/charm/osm-keystone/requirements.txt
deleted file mode 100644
index 4284431b..00000000
--- a/installers/charm/osm-keystone/requirements.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-ops < 2.2
-git+https://github.com/charmed-osm/config-validator/
-lightkube
-lightkube-models
\ No newline at end of file
diff --git a/installers/charm/osm-keystone/src/charm.py b/installers/charm/osm-keystone/src/charm.py
deleted file mode 100755
index c368ade3..00000000
--- a/installers/charm/osm-keystone/src/charm.py
+++ /dev/null
@@ -1,443 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Actions tab on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-"""Keystone charm module."""
-
-import logging
-from datetime import datetime
-
-from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch
-from config_validator import ValidationError
-from ops import pebble
-from ops.charm import ActionEvent, CharmBase, ConfigChangedEvent, UpdateStatusEvent
-from ops.main import main
-from ops.model import ActiveStatus, BlockedStatus, Container, MaintenanceStatus
-
-import cluster
-from config import ConfigModel, MysqlConnectionData, get_environment, validate_config
-from interfaces import KeystoneServer, MysqlClient
-
-logger = logging.getLogger(__name__)
-
-
-# We expect the keystone container to use the default port
-PORT = 5000
-
-KEY_SETUP_FILE = "/etc/keystone/key-setup"
-CREDENTIAL_KEY_REPOSITORY = "/etc/keystone/credential-keys/"
-FERNET_KEY_REPOSITORY = "/etc/keystone/fernet-keys/"
-KEYSTONE_USER = "keystone"
-KEYSTONE_GROUP = "keystone"
-FERNET_MAX_ACTIVE_KEYS = 3
-KEYSTONE_FOLDER = "/etc/keystone/"
-
-
-class CharmError(Exception):
- """Charm error exception."""
-
-
-class KeystoneCharm(CharmBase):
- """Keystone Charm operator."""
-
- on = cluster.ClusterEvents()
-
- def __init__(self, *args) -> None:
- super().__init__(*args)
- event_observe_mapping = {
- self.on.keystone_pebble_ready: self._on_config_changed,
- self.on.config_changed: self._on_config_changed,
- self.on.update_status: self._on_update_status,
- self.on.cluster_keys_changed: self._on_cluster_keys_changed,
- self.on["keystone"].relation_joined: self._publish_keystone_info,
- self.on["db"].relation_changed: self._on_config_changed,
- self.on["db"].relation_broken: self._on_config_changed,
- self.on["db-sync"].action: self._on_db_sync_action,
- }
- for event, observer in event_observe_mapping.items():
- self.framework.observe(event, observer)
- self.cluster = cluster.Cluster(self)
- self.mysql_client = MysqlClient(self, relation_name="db")
- self.keystone = KeystoneServer(self, relation_name="keystone")
- self.service_patch = KubernetesServicePatch(self, [(f"{self.app.name}", PORT)])
-
- @property
- def container(self) -> Container:
- """Property to get keystone container."""
- return self.unit.get_container("keystone")
-
- def _on_db_sync_action(self, event: ActionEvent):
- process = self.container.exec(["keystone-manage", "db_sync"])
- try:
- process.wait()
- event.set_results({"output": "db-sync was successfully executed."})
- except pebble.ExecError as e:
- error_message = f"db-sync action failed with code {e.exit_code} and stderr {e.stderr}."
- logger.error(error_message)
- event.fail(error_message)
-
- def _publish_keystone_info(self, _):
- """Handler for keystone-relation-joined."""
- if self.unit.is_leader():
- config = ConfigModel(**dict(self.config))
- self.keystone.publish_info(
- host=f"http://{self.app.name}:{PORT}/v3",
- port=PORT,
- user_domain_name=config.user_domain_name,
- project_domain_name=config.project_domain_name,
- username=config.service_username,
- password=config.service_password,
- service=config.service_project,
- keystone_db_password=config.keystone_db_password,
- region_id=config.region_id,
- admin_username=config.admin_username,
- admin_password=config.admin_password,
- admin_project_name=config.admin_project,
- )
-
- def _on_config_changed(self, _: ConfigChangedEvent) -> None:
- """Handler for config-changed event."""
- if self.container.can_connect():
- try:
- self._handle_fernet_key_rotation()
- self._safe_restart()
- self.unit.status = ActiveStatus()
- except CharmError as e:
- self.unit.status = BlockedStatus(str(e))
- except ValidationError as e:
- self.unit.status = BlockedStatus(str(e))
- else:
- logger.info("pebble socket not available, deferring config-changed")
- self.unit.status = MaintenanceStatus("waiting for pebble to start")
-
- def _on_update_status(self, event: UpdateStatusEvent) -> None:
- """Handler for update-status event."""
- if self.container.can_connect():
- self._handle_fernet_key_rotation()
- else:
- logger.info("pebble socket not available, deferring config-changed")
- event.defer()
- self.unit.status = MaintenanceStatus("waiting for pebble to start")
-
- def _on_cluster_keys_changed(self, _) -> None:
- """Handler for ClusterKeysChanged event."""
- self._handle_fernet_key_rotation()
-
- def _handle_fernet_key_rotation(self) -> None:
- """Handles fernet key rotation.
-
- First, the function writes the existing keys in the relation to disk.
- Then, if the unit is the leader, checks if the keys should be rotated
- or not.
- """
- self._key_write()
- if self.unit.is_leader():
- if not self.cluster.get_keys():
- self._key_setup()
- self._fernet_keys_rotate_and_sync()
-
- def _key_write(self) -> None:
- """Write keys to container from the relation data."""
- if self.unit.is_leader():
- return
- keys = self.cluster.get_keys()
- if not keys:
- logger.debug('"key_repository" not in relation data yet...')
- return
-
- for key_repository in [FERNET_KEY_REPOSITORY, CREDENTIAL_KEY_REPOSITORY]:
- self._create_keys_folders()
- for key_number, key in keys[key_repository].items():
- logger.debug(f"writing key {key_number} in {key_repository}")
- file_path = f"{key_repository}{key_number}"
- if self._file_changed(file_path, key):
- self.container.push(
- file_path,
- key,
- user=KEYSTONE_USER,
- group=KEYSTONE_GROUP,
- permissions=0o600,
- )
- self.container.push(KEY_SETUP_FILE, "")
-
- def _file_changed(self, file_path: str, content: str) -> bool:
- """Check if file in container has changed its value.
-
- This function checks if the file exists in the container. If it does,
- then it checks if the content of that file is equal to the content passed to
- this function. If the content is equal, the function returns False, otherwise True.
-
- Args:
- file_path (str): File path in the container.
- content (str): Content of the file.
-
- Returns:
- bool: True if the content of the file has changed, or the file doesn't exist in
- the container. False if the content passed to this function is the same as
- in the container.
- """
- if self._file_exists(file_path):
- old_content = self.container.pull(file_path).read()
- if old_content == content:
- return False
- return True
-
- def _create_keys_folders(self) -> None:
- """Create folders for Key repositories."""
- fernet_key_repository_found = False
- credential_key_repository_found = False
- for file in self.container.list_files(KEYSTONE_FOLDER):
- if file.type == pebble.FileType.DIRECTORY:
- if file.path == CREDENTIAL_KEY_REPOSITORY:
- credential_key_repository_found = True
- if file.path == FERNET_KEY_REPOSITORY:
- fernet_key_repository_found = True
- if not fernet_key_repository_found:
- self.container.make_dir(
- FERNET_KEY_REPOSITORY,
- user="keystone",
- group="keystone",
- permissions=0o700,
- make_parents=True,
- )
- if not credential_key_repository_found:
- self.container.make_dir(
- CREDENTIAL_KEY_REPOSITORY,
- user=KEYSTONE_USER,
- group=KEYSTONE_GROUP,
- permissions=0o700,
- make_parents=True,
- )
-
- def _fernet_keys_rotate_and_sync(self) -> None:
- """Rotate and sync the keys if the unit is the leader and the primary key has expired.
-
- The modification time of the staging key (key with index '0') is used,
- along with the config setting "token-expiration" to determine whether to
- rotate the keys.
-
- The rotation time = token-expiration / (max-active-keys - 2)
- where max-active-keys has a minimum of 3.
- """
- if not self.unit.is_leader():
- return
- try:
- fernet_key_file = self.container.list_files(f"{FERNET_KEY_REPOSITORY}0")[0]
- last_rotation = fernet_key_file.last_modified.timestamp()
- except pebble.APIError:
- logger.warning(
- "Fernet key rotation requested but key repository not " "initialized yet"
- )
- return
-
- config = ConfigModel(**self.config)
- rotation_time = config.token_expiration // (FERNET_MAX_ACTIVE_KEYS - 2)
-
- now = datetime.now().timestamp()
- if last_rotation + rotation_time > now:
- # No rotation to do as not reached rotation time
- logger.debug("No rotation needed")
- self._key_leader_set()
- return
- # now rotate the keys and sync them
- self._fernet_rotate()
- self._key_leader_set()
-
- logger.info("Rotated and started sync of fernet keys")
-
- def _key_leader_set(self) -> None:
- """Read current key sets and update peer relation data.
-
- The keys are read from the `FERNET_KEY_REPOSITORY` and `CREDENTIAL_KEY_REPOSITORY`
- directories. Note that this function will fail if it is called on the unit that is
- not the leader.
- """
- disk_keys = {}
- for key_repository in [FERNET_KEY_REPOSITORY, CREDENTIAL_KEY_REPOSITORY]:
- disk_keys[key_repository] = {}
- for file in self.container.list_files(key_repository):
- key_content = self.container.pull(f"{key_repository}{file.name}").read()
- disk_keys[key_repository][file.name] = key_content
- self.cluster.save_keys(disk_keys)
-
- def _fernet_rotate(self) -> None:
- """Rotate Fernet keys.
-
- To rotate the Fernet tokens, and create a new staging key, it calls (as the
- "keystone" user):
-
- keystone-manage fernet_rotate
-
- Note that we do not rotate the Credential encryption keys.
-
- Note that this does NOT synchronise the keys between the units. This is
- performed in `self._key_leader_set`.
- """
- logger.debug("Rotating Fernet tokens")
- try:
- exec_command = [
- "keystone-manage",
- "fernet_rotate",
- "--keystone-user",
- KEYSTONE_USER,
- "--keystone-group",
- KEYSTONE_GROUP,
- ]
- logger.debug(f'Executing command: {" ".join(exec_command)}')
- self.container.exec(exec_command).wait()
- logger.info("Fernet keys successfully rotated.")
- except pebble.ExecError as e:
- logger.error(f"Fernet Key rotation failed: {e}")
- logger.error("Exited with code %d. Stderr:", e.exit_code)
- for line in e.stderr.splitlines():
- logger.error(" %s", line)
-
- def _key_setup(self) -> None:
- """Initialize Fernet and Credential encryption key repositories.
-
- To setup the key repositories:
-
- keystone-manage fernet_setup
- keystone-manage credential_setup
-
- In addition we migrate any credentials currently stored in database using
- the null key to be encrypted by the new credential key:
-
- keystone-manage credential_migrate
-
- Note that we only want to do this once, so we touch an empty file
- (KEY_SETUP_FILE) to indicate that it has been done.
- """
- if self._file_exists(KEY_SETUP_FILE) or not self.unit.is_leader():
- return
-
- logger.debug("Setting up key repositories for Fernet tokens and Credential encryption.")
- try:
- for command in ["fernet_setup", "credential_setup"]:
- exec_command = [
- "keystone-manage",
- command,
- "--keystone-user",
- KEYSTONE_USER,
- "--keystone-group",
- KEYSTONE_GROUP,
- ]
- logger.debug(f'Executing command: {" ".join(exec_command)}')
- self.container.exec(exec_command).wait()
- self.container.push(KEY_SETUP_FILE, "")
- logger.info("Key repositories initialized successfully.")
- except pebble.ExecError as e:
- logger.error("Failed initializing key repositories.")
- logger.error("Exited with code %d. Stderr:", e.exit_code)
- for line in e.stderr.splitlines():
- logger.error(" %s", line)
-
- def _file_exists(self, path: str) -> bool:
- """Check if a file exists in the container.
-
- Args:
- path (str): Path of the file to be checked.
-
- Returns:
- bool: True if the file exists, else False.
- """
- file_exists = None
- try:
- _ = self.container.pull(path)
- file_exists = True
- except pebble.PathError:
- file_exists = False
- exist_str = "exists" if file_exists else 'doesn"t exist'
- logger.debug(f"File {path} {exist_str}.")
- return file_exists
-
- def _safe_restart(self) -> None:
- """Safely restart the keystone service.
-
- This function (re)starts the keystone service after doing some safety checks,
- like validating the charm configuration, checking the mysql relation is ready.
- """
- validate_config(self.config)
- self._check_mysql_data()
- # Workaround: OS_AUTH_URL is not ready when the entrypoint restarts apache2.
- # The function `self._patch_entrypoint` fixes that.
- self._patch_entrypoint()
- self._replan()
-
- def _patch_entrypoint(self) -> None:
- """Patches the entrypoint of the Keystone service.
-
- The entrypoint that restarts apache2, expects immediate communication to OS_AUTH_URL.
- This does not happen instantly. This function patches the entrypoint to wait until a
- curl to OS_AUTH_URL succeeds.
- """
- installer_script = self.container.pull("/app/start.sh").read()
- wait_until_ready_command = "until $(curl --output /dev/null --silent --head --fail $OS_AUTH_URL); do echo '...'; sleep 5; done"
- self.container.push(
- "/app/start-patched.sh",
- installer_script.replace(
- "source setup_env", f"source setup_env && {wait_until_ready_command}"
- ),
- permissions=0o755,
- )
-
- def _check_mysql_data(self) -> None:
- """Check if the mysql relation is ready.
-
- Raises:
- CharmError: Error raised if the mysql relation is not ready.
- """
- if self.mysql_client.is_missing_data_in_unit() and not self.config.get("mysql-uri"):
- raise CharmError("mysql relation is missing")
-
- def _replan(self) -> None:
- """Replan keystone service.
-
- This function starts the keystone service if it is not running.
- If the service started already, this function will restart the
- service if there are any changes to the layer.
- """
- mysql_data = MysqlConnectionData(
- self.config.get("mysql-uri")
- or f"mysql://root:{self.mysql_client.root_password}@{self.mysql_client.host}:{self.mysql_client.port}/"
- )
- layer = {
- "summary": "keystone layer",
- "description": "pebble config layer for keystone",
- "services": {
- "keystone": {
- "override": "replace",
- "summary": "keystone service",
- "command": "/app/start-patched.sh",
- "startup": "enabled",
- "environment": get_environment(self.app.name, self.config, mysql_data),
- }
- },
- }
- self.container.add_layer("keystone", layer, combine=True)
- self.container.replan()
-
-
-if __name__ == "__main__": # pragma: no cover
- main(KeystoneCharm)
diff --git a/installers/charm/osm-keystone/src/cluster.py b/installers/charm/osm-keystone/src/cluster.py
deleted file mode 100644
index f38adec0..00000000
--- a/installers/charm/osm-keystone/src/cluster.py
+++ /dev/null
@@ -1,135 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Actions tab on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-"""Keystone cluster library.
-
-This library allows the integration with Apache Guacd charm. Is is published as part of the
-[davigar15-apache-guacd]((https://charmhub.io/davigar15-apache-guacd) charm.
-
-The charm that requires guacd should include the following content in its metadata.yaml:
-
-```yaml
-# ...
-peers:
- cluster:
- interface: cluster
-# ...
-```
-
-A typical example of including this library might be:
-
-```python
-# ...
-from ops.framework import StoredState
-from charms.keystone.v0 import cluster
-
-class SomeApplication(CharmBase):
- on = cluster.ClusterEvents()
-
- def __init__(self, *args):
- # ...
- self.cluster = cluster.Cluster(self)
- self.framework.observe(self.on.cluster_keys_changed, self._cluster_keys_changed)
- # ...
-
- def _cluster_keys_changed(self, _):
- fernet_keys = self.cluster.fernet_keys
- credential_keys = self.cluster.credential_keys
- # ...
-```
-"""
-
-
-import json
-import logging
-from typing import Any, Dict, List
-
-from ops.charm import CharmEvents
-from ops.framework import EventBase, EventSource, Object
-from ops.model import Relation
-
-# Number of keys need might need to be adjusted in the future
-NUMBER_FERNET_KEYS = 2
-NUMBER_CREDENTIAL_KEYS = 2
-
-logger = logging.getLogger(__name__)
-
-
-class ClusterKeysChangedEvent(EventBase):
- """Event to announce a change in the Guacd service."""
-
-
-class ClusterEvents(CharmEvents):
- """Cluster Events."""
-
- cluster_keys_changed = EventSource(ClusterKeysChangedEvent)
-
-
-class Cluster(Object):
- """Peer relation."""
-
- def __init__(self, charm):
- super().__init__(charm, "cluster")
- self.charm = charm
-
- @property
- def fernet_keys(self) -> List[str]:
- """Fernet keys."""
- relation: Relation = self.model.get_relation("cluster")
- application_data = relation.data[self.model.app]
- return json.loads(application_data.get("keys-fernet", "[]"))
-
- @property
- def credential_keys(self) -> List[str]:
- """Credential keys."""
- relation: Relation = self.model.get_relation("cluster")
- application_data = relation.data[self.model.app]
- return json.loads(application_data.get("keys-credential", "[]"))
-
- def save_keys(self, keys: Dict[str, Any]) -> None:
- """Generate fernet and credential keys.
-
- This method will generate new keys and fire the cluster_keys_changed event.
- """
- logger.debug("Saving keys...")
- relation: Relation = self.model.get_relation("cluster")
- data = relation.data[self.model.app]
- current_keys_str = data.get("key_repository", "{}")
- current_keys = json.loads(current_keys_str)
- if current_keys != keys:
- data["key_repository"] = json.dumps(keys)
- self.charm.on.cluster_keys_changed.emit()
- logger.info("Keys saved!")
-
- def get_keys(self) -> Dict[str, Any]:
- """Get keys from the relation.
-
- Returns:
- Dict[str, Any]: Dictionary with the keys.
- """
- relation: Relation = self.model.get_relation("cluster")
- data = relation.data[self.model.app]
- current_keys_str = data.get("key_repository", "{}")
- current_keys = json.loads(current_keys_str)
- return current_keys
diff --git a/installers/charm/osm-keystone/src/config.py b/installers/charm/osm-keystone/src/config.py
deleted file mode 100644
index 803d5646..00000000
--- a/installers/charm/osm-keystone/src/config.py
+++ /dev/null
@@ -1,184 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Actions tab on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-"""Module that takes take of the charm configuration."""
-
-import re
-from typing import Any, Dict, Optional
-
-from config_validator import ConfigValidator, ValidationError
-from ops.model import ConfigData
-
-
-class MysqlConnectionData:
- """Mysql Connection Data class."""
-
- _compiled_regex = re.compile(
- r"^mysql\:\/\/{}@{}\/{}?$".format(
- r"(?P[_\w]+):(?P[\w\W]+)",
- r"(?P[\-\.\w]+):(?P\d+)",
- r"(?P[_\w]+)",
- )
- )
-
- def __init__(self, mysql_uri: str):
- match = self._compiled_regex.search(mysql_uri)
- if not match:
- raise ValidationError("mysql_uri is not properly formed")
- mysql_data = match.groupdict()
- self.host = mysql_data.get("host")
- self.port = int(mysql_data.get("port"))
- self.username = mysql_data.get("username")
- self.password = mysql_data.get("password")
- self.database = mysql_data.get("database")
- self.uri = mysql_uri
-
-
-def validate_config(config: ConfigData):
- """Validate charm configuration.
-
- Args:
- config (ConfigData): Charm configuration.
-
- Raises:
- config_validator.ValidationError if the validation failed.
- """
- kwargs: Dict[str, Any] = config
- ConfigModel(**kwargs)
- ConfigLdapModel(**kwargs)
-
-
-def get_environment(
- service_name: str, config: ConfigData, mysql_data: MysqlConnectionData
-) -> Dict[str, Any]:
- """Get environment variables.
-
- Args:
- service_name (str): Cluster IP service name.
- config (ConfigData): Charm configuration.
-
- Returns:
- Dict[str, Any]: Dictionary with the environment variables for Keystone service.
- """
- kwargs: Dict[str, Any] = config
- config = ConfigModel(**kwargs)
- config_ldap = ConfigLdapModel(**kwargs)
- environment = {
- "DB_HOST": mysql_data.host,
- "DB_PORT": mysql_data.port,
- "ROOT_DB_USER": mysql_data.username,
- "ROOT_DB_PASSWORD": mysql_data.password,
- "REGION_ID": config.region_id,
- "KEYSTONE_HOST": service_name,
- "KEYSTONE_DB_PASSWORD": config.keystone_db_password,
- "ADMIN_USERNAME": config.admin_username,
- "ADMIN_PASSWORD": config.admin_password,
- "ADMIN_PROJECT": config.admin_project,
- "SERVICE_USERNAME": config.service_username,
- "SERVICE_PASSWORD": config.service_password,
- "SERVICE_PROJECT": config.service_project,
- }
- if config_ldap.ldap_enabled:
- environment.update(
- {
- "LDAP_AUTHENTICATION_DOMAIN_NAME": config_ldap.ldap_authentication_domain_name,
- "LDAP_URL": config_ldap.ldap_url,
- "LDAP_PAGE_SIZE": str(config_ldap.ldap_page_size),
- "LDAP_USER_OBJECTCLASS": config_ldap.ldap_user_objectclass,
- "LDAP_USER_ID_ATTRIBUTE": config_ldap.ldap_user_id_attribute,
- "LDAP_USER_NAME_ATTRIBUTE": config_ldap.ldap_user_name_attribute,
- "LDAP_USER_PASS_ATTRIBUTE": config_ldap.ldap_user_pass_attribute,
- "LDAP_USER_ENABLED_MASK": str(config_ldap.ldap_user_enabled_mask),
- "LDAP_USER_ENABLED_DEFAULT": config_ldap.ldap_user_enabled_default,
- "LDAP_USER_ENABLED_INVERT": str(config_ldap.ldap_user_enabled_invert),
- "LDAP_GROUP_OBJECTCLASS": config_ldap.ldap_group_objectclass,
- }
- )
- if config_ldap.ldap_use_starttls:
- environment.update(
- {
- "LDAP_USE_STARTTLS": str(config_ldap.ldap_use_starttls),
- "LDAP_TLS_CACERT_BASE64": config_ldap.ldap_tls_cacert_base64,
- "LDAP_TLS_REQ_CERT": config_ldap.ldap_tls_req_cert,
- }
- )
- optional_ldap_configs = {
- "LDAP_BIND_USER": config_ldap.ldap_bind_user,
- "LDAP_BIND_PASSWORD": config_ldap.ldap_bind_password,
- "LDAP_USER_TREE_DN": config_ldap.ldap_user_tree_dn,
- "LDAP_USER_FILTER": config_ldap.ldap_user_filter,
- "LDAP_USER_ENABLED_ATTRIBUTE": config_ldap.ldap_user_enabled_attribute,
- "LDAP_CHASE_REFERRALS": config_ldap.ldap_chase_referrals,
- "LDAP_GROUP_TREE_DN": config_ldap.ldap_group_tree_dn,
- "LDAP_TLS_CACERT_BASE64": config_ldap.ldap_tls_cacert_base64,
- }
- for env, value in optional_ldap_configs.items():
- if value:
- environment[env] = value
- return environment
-
-
-class ConfigModel(ConfigValidator):
- """Keystone Configuration."""
-
- region_id: str
- keystone_db_password: str
- admin_username: str
- admin_password: str
- admin_project: str
- service_username: str
- service_password: str
- service_project: str
- user_domain_name: str
- project_domain_name: str
- token_expiration: int
- mysql_uri: Optional[str]
-
-
-class ConfigLdapModel(ConfigValidator):
- """LDAP Configuration."""
-
- ldap_enabled: bool
- ldap_authentication_domain_name: Optional[str]
- ldap_url: Optional[str]
- ldap_bind_user: Optional[str]
- ldap_bind_password: Optional[str]
- ldap_chase_referrals: Optional[str]
- ldap_page_size: Optional[int]
- ldap_user_tree_dn: Optional[str]
- ldap_user_objectclass: Optional[str]
- ldap_user_id_attribute: Optional[str]
- ldap_user_name_attribute: Optional[str]
- ldap_user_pass_attribute: Optional[str]
- ldap_user_filter: Optional[str]
- ldap_user_enabled_attribute: Optional[str]
- ldap_user_enabled_mask: Optional[int]
- ldap_user_enabled_default: Optional[str]
- ldap_user_enabled_invert: Optional[bool]
- ldap_group_objectclass: Optional[str]
- ldap_group_tree_dn: Optional[str]
- ldap_use_starttls: Optional[bool]
- ldap_tls_cacert_base64: Optional[str]
- ldap_tls_req_cert: Optional[str]
diff --git a/installers/charm/osm-keystone/src/interfaces.py b/installers/charm/osm-keystone/src/interfaces.py
deleted file mode 100644
index 7b019dd7..00000000
--- a/installers/charm/osm-keystone/src/interfaces.py
+++ /dev/null
@@ -1,190 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Actions tab on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-"""Interfaces used by this charm."""
-
-import ops.charm
-import ops.framework
-import ops.model
-
-
-class BaseRelationClient(ops.framework.Object):
- """Requires side of a Kafka Endpoint."""
-
- def __init__(
- self,
- charm: ops.charm.CharmBase,
- relation_name: str,
- mandatory_fields: list = [],
- ):
- super().__init__(charm, relation_name)
- self.relation_name = relation_name
- self.mandatory_fields = mandatory_fields
- self._update_relation()
-
- def get_data_from_unit(self, key: str):
- """Get data from unit relation data."""
- if not self.relation:
- # This update relation doesn't seem to be needed, but I added it because apparently
- # the data is empty in the unit tests.
- # In reality, the constructor is called in every hook.
- # In the unit tests when doing an update_relation_data, apparently it is not called.
- self._update_relation()
- if self.relation:
- for unit in self.relation.units:
- data = self.relation.data[unit].get(key)
- if data:
- return data
-
- def get_data_from_app(self, key: str):
- """Get data from app relation data."""
- if not self.relation or self.relation.app not in self.relation.data:
- # This update relation doesn't seem to be needed, but I added it because apparently
- # the data is empty in the unit tests.
- # In reality, the constructor is called in every hook.
- # In the unit tests when doing an update_relation_data, apparently it is not called.
- self._update_relation()
- if self.relation and self.relation.app in self.relation.data:
- data = self.relation.data[self.relation.app].get(key)
- if data:
- return data
-
- def is_missing_data_in_unit(self):
- """Check if mandatory fields are present in any of the unit's relation data."""
- return not all([self.get_data_from_unit(field) for field in self.mandatory_fields])
-
- def is_missing_data_in_app(self):
- """Check if mandatory fields are set in relation data."""
- return not all([self.get_data_from_app(field) for field in self.mandatory_fields])
-
- def _update_relation(self):
- self.relation = self.framework.model.get_relation(self.relation_name)
-
-
-class MysqlClient(BaseRelationClient):
- """Requires side of a Mysql Endpoint."""
-
- mandatory_fields = ["host", "port", "user", "password", "root_password"]
-
- def __init__(self, charm: ops.charm.CharmBase, relation_name: str):
- super().__init__(charm, relation_name, self.mandatory_fields)
-
- @property
- def host(self):
- """Host."""
- return self.get_data_from_unit("host")
-
- @property
- def port(self):
- """Port."""
- return self.get_data_from_unit("port")
-
- @property
- def user(self):
- """User."""
- return self.get_data_from_unit("user")
-
- @property
- def password(self):
- """Password."""
- return self.get_data_from_unit("password")
-
- @property
- def root_password(self):
- """Root password."""
- return self.get_data_from_unit("root_password")
-
- @property
- def database(self):
- """Database."""
- return self.get_data_from_unit("database")
-
- def get_root_uri(self, database: str):
- """Get the URI for the mysql connection with the root user credentials.
-
- Args:
- database: Database name
-
- Return:
- A string with the following format:
- mysql://root:@:/
- """
- return "mysql://root:{}@{}:{}/{}".format(
- self.root_password, self.host, self.port, database
- )
-
- def get_uri(self):
- """Get the URI for the mysql connection with the standard user credentials.
-
- Args:
- database: Database name
- Return:
- A string with the following format:
- mysql://:@:/
- """
- return "mysql://{}:{}@{}:{}/{}".format(
- self.user, self.password, self.host, self.port, self.database
- )
-
-
-class KeystoneServer(ops.framework.Object):
- """Provides side of a Keystone Endpoint."""
-
- relation_name: str = None
-
- def __init__(self, charm: ops.charm.CharmBase, relation_name: str):
- super().__init__(charm, relation_name)
- self.relation_name = relation_name
-
- def publish_info(
- self,
- host: str,
- port: int,
- user_domain_name: str,
- project_domain_name: str,
- username: str,
- password: str,
- service: str,
- keystone_db_password: str,
- region_id: str,
- admin_username: str,
- admin_password: str,
- admin_project_name: str,
- ):
- """Publish information in Keystone relation."""
- if self.framework.model.unit.is_leader():
- for relation in self.framework.model.relations[self.relation_name]:
- relation_data = relation.data[self.framework.model.app]
- relation_data["host"] = str(host)
- relation_data["port"] = str(port)
- relation_data["user_domain_name"] = str(user_domain_name)
- relation_data["project_domain_name"] = str(project_domain_name)
- relation_data["username"] = str(username)
- relation_data["password"] = str(password)
- relation_data["service"] = str(service)
- relation_data["keystone_db_password"] = str(keystone_db_password)
- relation_data["region_id"] = str(region_id)
- relation_data["admin_username"] = str(admin_username)
- relation_data["admin_password"] = str(admin_password)
- relation_data["admin_project_name"] = str(admin_project_name)
diff --git a/installers/charm/osm-keystone/tests/integration/test_charm.py b/installers/charm/osm-keystone/tests/integration/test_charm.py
deleted file mode 100644
index 7e985427..00000000
--- a/installers/charm/osm-keystone/tests/integration/test_charm.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-
-import logging
-from pathlib import Path
-
-import pytest
-import yaml
-from pytest_operator.plugin import OpsTest
-
-logger = logging.getLogger(__name__)
-
-METADATA = yaml.safe_load(Path("./metadata.yaml").read_text())
-
-
-@pytest.mark.abort_on_fail
-async def test_build_and_deploy(ops_test: OpsTest):
- """Build the charm-under-test and deploy it together with related charms.
-
- Assert on the unit status before any relations/configurations take place.
- """
- await ops_test.model.set_config({"update-status-hook-interval": "10s"})
- await ops_test.model.deploy("charmed-osm-mariadb-k8s", application_name="mariadb-k8s")
- # build and deploy charm from local source folder
- charm = await ops_test.build_charm(".")
- resources = {
- "keystone-image": METADATA["resources"]["keystone-image"]["upstream-source"],
- }
- await ops_test.model.deploy(charm, resources=resources, application_name="keystone")
- await ops_test.model.add_relation("keystone:db", "mariadb-k8s:mysql")
- await ops_test.model.wait_for_idle(
- apps=["keystone", "mariadb-k8s"], status="active", timeout=1000
- )
- assert ops_test.model.applications["keystone"].units[0].workload_status == "active"
-
- await ops_test.model.set_config({"update-status-hook-interval": "60m"})
diff --git a/installers/charm/osm-keystone/tests/unit/test_charm.py b/installers/charm/osm-keystone/tests/unit/test_charm.py
deleted file mode 100644
index 7207b63e..00000000
--- a/installers/charm/osm-keystone/tests/unit/test_charm.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-
-import pytest
-from ops import pebble
-from ops.model import ActiveStatus, BlockedStatus
-from ops.testing import Harness
-from pytest_mock import MockerFixture
-
-from charm import FERNET_KEY_REPOSITORY, KEYSTONE_FOLDER, KeystoneCharm
-
-
-@pytest.fixture
-def harness_no_relations(mocker: MockerFixture):
- mocker.patch("charm.cluster")
- mocker.patch("charm.KubernetesServicePatch")
- keystone_harness = Harness(KeystoneCharm)
- keystone_harness.begin()
- container = keystone_harness.charm.unit.get_container("keystone")
- keystone_harness.set_can_connect(container, True)
- container.make_dir(KEYSTONE_FOLDER, make_parents=True)
- container.make_dir(FERNET_KEY_REPOSITORY, make_parents=True)
- container.push(f"{FERNET_KEY_REPOSITORY}0", "token")
- container.make_dir("/app", make_parents=True)
- container.push("/app/start.sh", "")
- container.exec = mocker.Mock()
- yield keystone_harness
- keystone_harness.cleanup()
-
-
-@pytest.fixture
-def harness(harness_no_relations: Harness):
- mysql_rel_id = harness_no_relations.add_relation("db", "mysql")
- harness_no_relations.add_relation_unit(mysql_rel_id, "mysql/0")
- harness_no_relations.update_relation_data(
- mysql_rel_id,
- "mysql/0",
- {
- "host": "host",
- "port": "3306",
- "user": "user",
- "root_password": "root_pass",
- "password": "password",
- "database": "db",
- },
- )
- return harness_no_relations
-
-
-def test_mysql_missing_relation(mocker: MockerFixture, harness_no_relations: Harness):
- spy_safe_restart = mocker.spy(harness_no_relations.charm, "_safe_restart")
- harness_no_relations.charm.on.keystone_pebble_ready.emit("keystone")
- assert harness_no_relations.charm.unit.status == BlockedStatus("mysql relation is missing")
- assert spy_safe_restart.call_count == 1
- harness_no_relations.charm.on.config_changed.emit()
- assert harness_no_relations.charm.unit.status == BlockedStatus("mysql relation is missing")
- assert spy_safe_restart.call_count == 2
-
-
-def test_mysql_relation_ready(mocker: MockerFixture, harness: Harness):
- spy = mocker.spy(harness.charm, "_safe_restart")
- harness.charm.on.config_changed.emit()
- assert harness.charm.unit.status == ActiveStatus()
- assert spy.call_count == 1
-
-
-def test_db_sync_action(mocker: MockerFixture, harness: Harness):
- event_mock = mocker.Mock()
- harness.charm._on_db_sync_action(event_mock)
- event_mock.set_results.assert_called_once_with(
- {"output": "db-sync was successfully executed."}
- )
- event_mock.fail.assert_not_called()
- harness.charm.container.exec().wait.side_effect = pebble.ExecError(
- ["keystone-manage", "db_sync"], 1, "", "Error"
- )
- harness.charm._on_db_sync_action(event_mock)
- event_mock.fail.assert_called_once_with("db-sync action failed with code 1 and stderr Error.")
-
-
-def test_provide_keystone_relation(mocker: MockerFixture, harness: Harness):
- # Non-leader
- mon_rel_id = harness.add_relation("keystone", "mon")
- harness.add_relation_unit(mon_rel_id, "mon/0")
- data = harness.get_relation_data(mon_rel_id, harness.charm.app)
- assert data == {}
- # Leader
- harness.set_leader(True)
- nbi_rel_id = harness.add_relation("keystone", "nbi")
- harness.add_relation_unit(nbi_rel_id, "nbi/0")
- data = harness.get_relation_data(nbi_rel_id, harness.charm.app)
- assert data == {
- "host": "http://osm-keystone:5000/v3",
- "port": "5000",
- "user_domain_name": "default",
- "project_domain_name": "default",
- "username": "nbi",
- "password": "nbi",
- "service": "service",
- "keystone_db_password": "admin",
- "region_id": "RegionOne",
- "admin_username": "admin",
- "admin_password": "admin",
- "admin_project_name": "admin",
- }
-
-
-def test_update_status_rotation(mocker: MockerFixture, harness: Harness):
- spy_fernet_rotate = mocker.spy(harness.charm, "_fernet_rotate")
- harness.set_leader(True)
- harness._update_config({"token-expiration": -1})
- harness.charm.on.update_status.emit()
- assert spy_fernet_rotate.call_count == 1
-
-
-def test_update_status_no_rotation(mocker: MockerFixture, harness: Harness):
- spy_fernet_rotate = mocker.spy(harness.charm, "_fernet_rotate")
- harness.set_leader(True)
- harness._update_config({"token-expiration": 3600})
- harness.charm.on.update_status.emit()
- assert spy_fernet_rotate.call_count == 0
diff --git a/installers/charm/osm-keystone/tox.ini b/installers/charm/osm-keystone/tox.ini
deleted file mode 100644
index d08fe86c..00000000
--- a/installers/charm/osm-keystone/tox.ini
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-[tox]
-skipsdist=True
-skip_missing_interpreters = True
-envlist = lint, unit, analyze, integration
-
-[vars]
-src_path = {toxinidir}/src/
-tst_path = {toxinidir}/tests/
-all_path = {[vars]src_path} {[vars]tst_path}
-
-[testenv]
-basepython = python3.8
-setenv =
- PYTHONPATH = {toxinidir}:{toxinidir}/lib:{[vars]src_path}
- PYTHONBREAKPOINT=ipdb.set_trace
-passenv =
- PYTHONPATH
- HOME
- PATH
- CHARM_BUILD_DIR
- MODEL_SETTINGS
- HTTP_PROXY
- HTTPS_PROXY
- NO_PROXY
-
-[testenv:fmt]
-description = Apply coding style standards to code
-deps =
- black
- isort
-commands =
- isort {[vars]all_path}
- black {[vars]all_path}
-
-[testenv:lint]
-description = Check code against coding style standards
-deps =
- black
- flake8
- flake8-docstrings
- flake8-copyright
- flake8-builtins
- # prospector[with_everything]
- pyproject-flake8
- pep8-naming
- isort
- codespell
- yamllint
-commands =
- codespell {toxinidir}/*.yaml {toxinidir}/*.ini {toxinidir}/*.md \
- {toxinidir}/*.toml {toxinidir}/*.txt {toxinidir}/.github
- # prospector -A -F -T
- yamllint -d '\{extends: default, ignore: "build\n.tox" \}' .
- # pflake8 wrapper supports config from pyproject.toml
- pflake8 {[vars]all_path}
- isort --check-only --diff {[vars]all_path}
- black --check --diff {[vars]all_path}
-
-[testenv:unit]
-description = Run unit tests
-deps =
- pytest
- pytest-mock
- pytest-cov
- coverage[toml]
- -r{toxinidir}/requirements.txt
-commands =
- pytest --ignore={[vars]tst_path}integration --cov={[vars]src_path} --cov-report=xml
- coverage report --omit=tests/*
-
-[testenv:analyze]
-description = Run analize
-deps =
- pylint==2.10.2
- -r{toxinidir}/requirements.txt
-commands =
- pylint -E {[vars]src_path}
-
-[testenv:security]
-description = Run security tests
-deps =
- bandit
- safety
-commands =
- bandit -r {[vars]src_path}
- - safety check
-
-[testenv:integration]
-description = Run integration tests
-deps =
- pytest
- juju<3
- pytest-operator
- -r{toxinidir}/requirements.txt
-commands =
- pytest -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs} --cloud microk8s
diff --git a/installers/charm/osm-lcm/.gitignore b/installers/charm/osm-lcm/.gitignore
deleted file mode 100644
index 87d0a587..00000000
--- a/installers/charm/osm-lcm/.gitignore
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-venv/
-build/
-*.charm
-.tox/
-.coverage
-coverage.xml
-__pycache__/
-*.py[cod]
-.vscode
\ No newline at end of file
diff --git a/installers/charm/osm-lcm/.jujuignore b/installers/charm/osm-lcm/.jujuignore
deleted file mode 100644
index 17c7a8bb..00000000
--- a/installers/charm/osm-lcm/.jujuignore
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-/venv
-*.py[cod]
-*.charm
diff --git a/installers/charm/osm-lcm/CONTRIBUTING.md b/installers/charm/osm-lcm/CONTRIBUTING.md
deleted file mode 100644
index d4fd8b99..00000000
--- a/installers/charm/osm-lcm/CONTRIBUTING.md
+++ /dev/null
@@ -1,78 +0,0 @@
-
-
-# Contributing
-
-## Overview
-
-This documents explains the processes and practices recommended for contributing enhancements to
-this operator.
-
-- Generally, before developing enhancements to this charm, you should consider [opening an issue
- ](https://osm.etsi.org/bugzilla/enter_bug.cgi?product=OSM) explaining your use case. (Component=devops, version=master)
-- If you would like to chat with us about your use-cases or proposed implementation, you can reach
- us at [OSM Juju public channel](https://opensourcemano.slack.com/archives/C027KJGPECA).
-- Familiarising yourself with the [Charmed Operator Framework](https://juju.is/docs/sdk) library
- will help you a lot when working on new features or bug fixes.
-- All enhancements require review before being merged. Code review typically examines
- - code quality
- - test coverage
- - user experience for Juju administrators this charm.
-- Please help us out in ensuring easy to review branches by rebasing your gerrit patch onto
- the `master` branch.
-
-## Developing
-
-You can use the environments created by `tox` for development:
-
-```shell
-tox --notest -e unit
-source .tox/unit/bin/activate
-```
-
-### Testing
-
-```shell
-tox -e fmt # update your code according to linting rules
-tox -e lint # code style
-tox -e unit # unit tests
-tox -e integration # integration tests
-tox # runs 'lint' and 'unit' environments
-```
-
-## Build charm
-
-Build the charm in this git repository using:
-
-```shell
-charmcraft pack
-```
-
-### Deploy
-
-```bash
-# Create a model
-juju add-model dev
-# Enable DEBUG logging
-juju model-config logging-config="=INFO;unit=DEBUG"
-# Deploy the charm
-juju deploy ./osm-lcm_ubuntu-22.04-amd64.charm \
- --resource lcm-image=opensourcemano/lcm:testing-daily --series jammy
-```
diff --git a/installers/charm/osm-lcm/LICENSE b/installers/charm/osm-lcm/LICENSE
deleted file mode 100644
index 7e9d5046..00000000
--- a/installers/charm/osm-lcm/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2022 Canonical Ltd.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/installers/charm/osm-lcm/README.md b/installers/charm/osm-lcm/README.md
deleted file mode 100644
index b9b2f80f..00000000
--- a/installers/charm/osm-lcm/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-
-
-
-
-# OSM LCM
-
-Charmhub package name: osm-lcm
-More information: https://charmhub.io/osm-lcm
-
-## Other resources
-
-* [Read more](https://osm.etsi.org/docs/user-guide/latest/)
-
-* [Contributing](https://osm.etsi.org/gitweb/?p=osm/devops.git;a=blob;f=installers/charm/osm-lcm/CONTRIBUTING.md)
-
-* See the [Juju SDK documentation](https://juju.is/docs/sdk) for more information about developing and improving charms.
-
diff --git a/installers/charm/osm-lcm/actions.yaml b/installers/charm/osm-lcm/actions.yaml
deleted file mode 100644
index 0d73468f..00000000
--- a/installers/charm/osm-lcm/actions.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Actions tab on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-get-debug-mode-information:
- description: Get information to debug the container
diff --git a/installers/charm/osm-lcm/charmcraft.yaml b/installers/charm/osm-lcm/charmcraft.yaml
deleted file mode 100644
index f5e3ff37..00000000
--- a/installers/charm/osm-lcm/charmcraft.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-
-type: charm
-bases:
- - build-on:
- - name: "ubuntu"
- channel: "22.04"
- run-on:
- - name: "ubuntu"
- channel: "22.04"
-
-parts:
- charm:
- # build-packages:
- # - git
- prime:
- - files/*
diff --git a/installers/charm/osm-lcm/config.yaml b/installers/charm/osm-lcm/config.yaml
deleted file mode 100644
index e539f7b1..00000000
--- a/installers/charm/osm-lcm/config.yaml
+++ /dev/null
@@ -1,104 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Configure tab on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-options:
- log-level:
- default: "INFO"
- description: |
- Set the Logging Level.
-
- Options:
- - TRACE
- - DEBUG
- - INFO
- - WARN
- - ERROR
- - FATAL
- type: string
- database-commonkey:
- description: Database COMMON KEY
- type: string
- default: osm
- # Helm options
- helm-stable-repo-url:
- description: Stable repository URL for Helm charts
- type: string
- default: https://charts.helm.sh/stable
- helm-ca-certs:
- description: CA certificates to validate access to Helm repository
- type: string
- default: ""
- # Debug-mode options
- debug-mode:
- type: boolean
- description: |
- Great for OSM Developers! (Not recommended for production deployments)
-
- This action activates the Debug Mode, which sets up the container to be ready for debugging.
- As part of the setup, SSH is enabled and a VSCode workspace file is automatically populated.
-
- After enabling the debug-mode, execute the following command to get the information you need
- to start debugging:
- `juju run-action get-debug-mode-information --wait`
-
- The previous command returns the command you need to execute, and the SSH password that was set.
-
- See also:
- - https://charmhub.io/osm-lcm/configure#lcm-hostpath
- - https://charmhub.io/osm-lcm/configure#n2vc-hostpath
- - https://charmhub.io/osm-lcm/configure#common-hostpath
- default: false
- lcm-hostpath:
- type: string
- description: |
- Set this config to the local path of the LCM module to persist the changes done during the
- debug-mode session.
-
- Example:
- $ git clone "https://osm.etsi.org/gerrit/osm/LCM" /home/ubuntu/LCM
- $ juju config lcm lcm-hostpath=/home/ubuntu/LCM
-
- This configuration only applies if option `debug-mode` is set to true.
- n2vc-hostpath:
- type: string
- description: |
- Set this config to the local path of the N2VC module to persist the changes done during the
- debug-mode session.
-
- Example:
- $ git clone "https://osm.etsi.org/gerrit/osm/N2VC" /home/ubuntu/N2VC
- $ juju config lcm n2vc-hostpath=/home/ubuntu/N2VC
-
- This configuration only applies if option `debug-mode` is set to true.
- common-hostpath:
- type: string
- description: |
- Set this config to the local path of the common module to persist the changes done during the
- debug-mode session.
-
- Example:
- $ git clone "https://osm.etsi.org/gerrit/osm/common" /home/ubuntu/common
- $ juju config lcm common-hostpath=/home/ubuntu/common
-
- This configuration only applies if option `debug-mode` is set to true.
diff --git a/installers/charm/osm-lcm/files/vscode-workspace.json b/installers/charm/osm-lcm/files/vscode-workspace.json
deleted file mode 100644
index f17b24dd..00000000
--- a/installers/charm/osm-lcm/files/vscode-workspace.json
+++ /dev/null
@@ -1,20 +0,0 @@
-{
- "folders": [
- {"path": "/usr/lib/python3/dist-packages/osm_lcm"},
- {"path": "/usr/lib/python3/dist-packages/osm_common"},
- {"path": "/usr/lib/python3/dist-packages/n2vc"}
- ],
- "settings": {},
- "launch": {
- "version": "0.2.0",
- "configurations": [
- {
- "name": "LCM",
- "type": "python",
- "request": "launch",
- "module": "osm_lcm.lcm",
- "justMyCode": false,
- }
- ]
- }
-}
\ No newline at end of file
diff --git a/installers/charm/osm-lcm/lib/charms/data_platform_libs/v0/data_interfaces.py b/installers/charm/osm-lcm/lib/charms/data_platform_libs/v0/data_interfaces.py
deleted file mode 100644
index b3da5aa4..00000000
--- a/installers/charm/osm-lcm/lib/charms/data_platform_libs/v0/data_interfaces.py
+++ /dev/null
@@ -1,1130 +0,0 @@
-# Copyright 2023 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Library to manage the relation for the data-platform products.
-
-This library contains the Requires and Provides classes for handling the relation
-between an application and multiple managed application supported by the data-team:
-MySQL, Postgresql, MongoDB, Redis, and Kakfa.
-
-### Database (MySQL, Postgresql, MongoDB, and Redis)
-
-#### Requires Charm
-This library is a uniform interface to a selection of common database
-metadata, with added custom events that add convenience to database management,
-and methods to consume the application related data.
-
-
-Following an example of using the DatabaseCreatedEvent, in the context of the
-application charm code:
-
-```python
-
-from charms.data_platform_libs.v0.data_interfaces import (
- DatabaseCreatedEvent,
- DatabaseRequires,
-)
-
-class ApplicationCharm(CharmBase):
- # Application charm that connects to database charms.
-
- def __init__(self, *args):
- super().__init__(*args)
-
- # Charm events defined in the database requires charm library.
- self.database = DatabaseRequires(self, relation_name="database", database_name="database")
- self.framework.observe(self.database.on.database_created, self._on_database_created)
-
- def _on_database_created(self, event: DatabaseCreatedEvent) -> None:
- # Handle the created database
-
- # Create configuration file for app
- config_file = self._render_app_config_file(
- event.username,
- event.password,
- event.endpoints,
- )
-
- # Start application with rendered configuration
- self._start_application(config_file)
-
- # Set active status
- self.unit.status = ActiveStatus("received database credentials")
-```
-
-As shown above, the library provides some custom events to handle specific situations,
-which are listed below:
-
-- database_created: event emitted when the requested database is created.
-- endpoints_changed: event emitted when the read/write endpoints of the database have changed.
-- read_only_endpoints_changed: event emitted when the read-only endpoints of the database
- have changed. Event is not triggered if read/write endpoints changed too.
-
-If it is needed to connect multiple database clusters to the same relation endpoint
-the application charm can implement the same code as if it would connect to only
-one database cluster (like the above code example).
-
-To differentiate multiple clusters connected to the same relation endpoint
-the application charm can use the name of the remote application:
-
-```python
-
-def _on_database_created(self, event: DatabaseCreatedEvent) -> None:
- # Get the remote app name of the cluster that triggered this event
- cluster = event.relation.app.name
-```
-
-It is also possible to provide an alias for each different database cluster/relation.
-
-So, it is possible to differentiate the clusters in two ways.
-The first is to use the remote application name, i.e., `event.relation.app.name`, as above.
-
-The second way is to use different event handlers to handle each cluster events.
-The implementation would be something like the following code:
-
-```python
-
-from charms.data_platform_libs.v0.data_interfaces import (
- DatabaseCreatedEvent,
- DatabaseRequires,
-)
-
-class ApplicationCharm(CharmBase):
- # Application charm that connects to database charms.
-
- def __init__(self, *args):
- super().__init__(*args)
-
- # Define the cluster aliases and one handler for each cluster database created event.
- self.database = DatabaseRequires(
- self,
- relation_name="database",
- database_name="database",
- relations_aliases = ["cluster1", "cluster2"],
- )
- self.framework.observe(
- self.database.on.cluster1_database_created, self._on_cluster1_database_created
- )
- self.framework.observe(
- self.database.on.cluster2_database_created, self._on_cluster2_database_created
- )
-
- def _on_cluster1_database_created(self, event: DatabaseCreatedEvent) -> None:
- # Handle the created database on the cluster named cluster1
-
- # Create configuration file for app
- config_file = self._render_app_config_file(
- event.username,
- event.password,
- event.endpoints,
- )
- ...
-
- def _on_cluster2_database_created(self, event: DatabaseCreatedEvent) -> None:
- # Handle the created database on the cluster named cluster2
-
- # Create configuration file for app
- config_file = self._render_app_config_file(
- event.username,
- event.password,
- event.endpoints,
- )
- ...
-
-```
-
-### Provider Charm
-
-Following an example of using the DatabaseRequestedEvent, in the context of the
-database charm code:
-
-```python
-from charms.data_platform_libs.v0.data_interfaces import DatabaseProvides
-
-class SampleCharm(CharmBase):
-
- def __init__(self, *args):
- super().__init__(*args)
- # Charm events defined in the database provides charm library.
- self.provided_database = DatabaseProvides(self, relation_name="database")
- self.framework.observe(self.provided_database.on.database_requested,
- self._on_database_requested)
- # Database generic helper
- self.database = DatabaseHelper()
-
- def _on_database_requested(self, event: DatabaseRequestedEvent) -> None:
- # Handle the event triggered by a new database requested in the relation
- # Retrieve the database name using the charm library.
- db_name = event.database
- # generate a new user credential
- username = self.database.generate_user()
- password = self.database.generate_password()
- # set the credentials for the relation
- self.provided_database.set_credentials(event.relation.id, username, password)
- # set other variables for the relation event.set_tls("False")
-```
-As shown above, the library provides a custom event (database_requested) to handle
-the situation when an application charm requests a new database to be created.
-It's preferred to subscribe to this event instead of relation changed event to avoid
-creating a new database when other information other than a database name is
-exchanged in the relation databag.
-
-### Kafka
-
-This library is the interface to use and interact with the Kafka charm. This library contains
-custom events that add convenience to manage Kafka, and provides methods to consume the
-application related data.
-
-#### Requirer Charm
-
-```python
-
-from charms.data_platform_libs.v0.data_interfaces import (
- BootstrapServerChangedEvent,
- KafkaRequires,
- TopicCreatedEvent,
-)
-
-class ApplicationCharm(CharmBase):
-
- def __init__(self, *args):
- super().__init__(*args)
- self.kafka = KafkaRequires(self, "kafka_client", "test-topic")
- self.framework.observe(
- self.kafka.on.bootstrap_server_changed, self._on_kafka_bootstrap_server_changed
- )
- self.framework.observe(
- self.kafka.on.topic_created, self._on_kafka_topic_created
- )
-
- def _on_kafka_bootstrap_server_changed(self, event: BootstrapServerChangedEvent):
- # Event triggered when a bootstrap server was changed for this application
-
- new_bootstrap_server = event.bootstrap_server
- ...
-
- def _on_kafka_topic_created(self, event: TopicCreatedEvent):
- # Event triggered when a topic was created for this application
- username = event.username
- password = event.password
- tls = event.tls
- tls_ca= event.tls_ca
- bootstrap_server event.bootstrap_server
- consumer_group_prefic = event.consumer_group_prefix
- zookeeper_uris = event.zookeeper_uris
- ...
-
-```
-
-As shown above, the library provides some custom events to handle specific situations,
-which are listed below:
-
-- topic_created: event emitted when the requested topic is created.
-- bootstrap_server_changed: event emitted when the bootstrap server have changed.
-- credential_changed: event emitted when the credentials of Kafka changed.
-
-### Provider Charm
-
-Following the previous example, this is an example of the provider charm.
-
-```python
-class SampleCharm(CharmBase):
-
-from charms.data_platform_libs.v0.data_interfaces import (
- KafkaProvides,
- TopicRequestedEvent,
-)
-
- def __init__(self, *args):
- super().__init__(*args)
-
- # Default charm events.
- self.framework.observe(self.on.start, self._on_start)
-
- # Charm events defined in the Kafka Provides charm library.
- self.kafka_provider = KafkaProvides(self, relation_name="kafka_client")
- self.framework.observe(self.kafka_provider.on.topic_requested, self._on_topic_requested)
- # Kafka generic helper
- self.kafka = KafkaHelper()
-
- def _on_topic_requested(self, event: TopicRequestedEvent):
- # Handle the on_topic_requested event.
-
- topic = event.topic
- relation_id = event.relation.id
- # set connection info in the databag relation
- self.kafka_provider.set_bootstrap_server(relation_id, self.kafka.get_bootstrap_server())
- self.kafka_provider.set_credentials(relation_id, username=username, password=password)
- self.kafka_provider.set_consumer_group_prefix(relation_id, ...)
- self.kafka_provider.set_tls(relation_id, "False")
- self.kafka_provider.set_zookeeper_uris(relation_id, ...)
-
-```
-As shown above, the library provides a custom event (topic_requested) to handle
-the situation when an application charm requests a new topic to be created.
-It is preferred to subscribe to this event instead of relation changed event to avoid
-creating a new topic when other information other than a topic name is
-exchanged in the relation databag.
-"""
-
-import json
-import logging
-from abc import ABC, abstractmethod
-from collections import namedtuple
-from datetime import datetime
-from typing import List, Optional
-
-from ops.charm import (
- CharmBase,
- CharmEvents,
- RelationChangedEvent,
- RelationEvent,
- RelationJoinedEvent,
-)
-from ops.framework import EventSource, Object
-from ops.model import Relation
-
-# The unique Charmhub library identifier, never change it
-LIBID = "6c3e6b6680d64e9c89e611d1a15f65be"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 7
-
-PYDEPS = ["ops>=2.0.0"]
-
-logger = logging.getLogger(__name__)
-
-Diff = namedtuple("Diff", "added changed deleted")
-Diff.__doc__ = """
-A tuple for storing the diff between two data mappings.
-
-added - keys that were added
-changed - keys that still exist but have new values
-deleted - key that were deleted"""
-
-
-def diff(event: RelationChangedEvent, bucket: str) -> Diff:
- """Retrieves the diff of the data in the relation changed databag.
-
- Args:
- event: relation changed event.
- bucket: bucket of the databag (app or unit)
-
- Returns:
- a Diff instance containing the added, deleted and changed
- keys from the event relation databag.
- """
- # Retrieve the old data from the data key in the application relation databag.
- old_data = json.loads(event.relation.data[bucket].get("data", "{}"))
- # Retrieve the new data from the event relation databag.
- new_data = {
- key: value for key, value in event.relation.data[event.app].items() if key != "data"
- }
-
- # These are the keys that were added to the databag and triggered this event.
- added = new_data.keys() - old_data.keys()
- # These are the keys that were removed from the databag and triggered this event.
- deleted = old_data.keys() - new_data.keys()
- # These are the keys that already existed in the databag,
- # but had their values changed.
- changed = {key for key in old_data.keys() & new_data.keys() if old_data[key] != new_data[key]}
- # Convert the new_data to a serializable format and save it for a next diff check.
- event.relation.data[bucket].update({"data": json.dumps(new_data)})
-
- # Return the diff with all possible changes.
- return Diff(added, changed, deleted)
-
-
-# Base DataProvides and DataRequires
-
-
-class DataProvides(Object, ABC):
- """Base provides-side of the data products relation."""
-
- def __init__(self, charm: CharmBase, relation_name: str) -> None:
- super().__init__(charm, relation_name)
- self.charm = charm
- self.local_app = self.charm.model.app
- self.local_unit = self.charm.unit
- self.relation_name = relation_name
- self.framework.observe(
- charm.on[relation_name].relation_changed,
- self._on_relation_changed,
- )
-
- def _diff(self, event: RelationChangedEvent) -> Diff:
- """Retrieves the diff of the data in the relation changed databag.
-
- Args:
- event: relation changed event.
-
- Returns:
- a Diff instance containing the added, deleted and changed
- keys from the event relation databag.
- """
- return diff(event, self.local_app)
-
- @abstractmethod
- def _on_relation_changed(self, event: RelationChangedEvent) -> None:
- """Event emitted when the relation data has changed."""
- raise NotImplementedError
-
- def fetch_relation_data(self) -> dict:
- """Retrieves data from relation.
-
- This function can be used to retrieve data from a relation
- in the charm code when outside an event callback.
-
- Returns:
- a dict of the values stored in the relation data bag
- for all relation instances (indexed by the relation id).
- """
- data = {}
- for relation in self.relations:
- data[relation.id] = {
- key: value for key, value in relation.data[relation.app].items() if key != "data"
- }
- return data
-
- def _update_relation_data(self, relation_id: int, data: dict) -> None:
- """Updates a set of key-value pairs in the relation.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- data: dict containing the key-value pairs
- that should be updated in the relation.
- """
- if self.local_unit.is_leader():
- relation = self.charm.model.get_relation(self.relation_name, relation_id)
- relation.data[self.local_app].update(data)
-
- @property
- def relations(self) -> List[Relation]:
- """The list of Relation instances associated with this relation_name."""
- return list(self.charm.model.relations[self.relation_name])
-
- def set_credentials(self, relation_id: int, username: str, password: str) -> None:
- """Set credentials.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- username: user that was created.
- password: password of the created user.
- """
- self._update_relation_data(
- relation_id,
- {
- "username": username,
- "password": password,
- },
- )
-
- def set_tls(self, relation_id: int, tls: str) -> None:
- """Set whether TLS is enabled.
-
- Args:
- relation_id: the identifier for a particular relation.
- tls: whether tls is enabled (True or False).
- """
- self._update_relation_data(relation_id, {"tls": tls})
-
- def set_tls_ca(self, relation_id: int, tls_ca: str) -> None:
- """Set the TLS CA in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- tls_ca: TLS certification authority.
- """
- self._update_relation_data(relation_id, {"tls_ca": tls_ca})
-
-
-class DataRequires(Object, ABC):
- """Requires-side of the relation."""
-
- def __init__(
- self,
- charm,
- relation_name: str,
- extra_user_roles: str = None,
- ):
- """Manager of base client relations."""
- super().__init__(charm, relation_name)
- self.charm = charm
- self.extra_user_roles = extra_user_roles
- self.local_app = self.charm.model.app
- self.local_unit = self.charm.unit
- self.relation_name = relation_name
- self.framework.observe(
- self.charm.on[relation_name].relation_joined, self._on_relation_joined_event
- )
- self.framework.observe(
- self.charm.on[relation_name].relation_changed, self._on_relation_changed_event
- )
-
- @abstractmethod
- def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None:
- """Event emitted when the application joins the relation."""
- raise NotImplementedError
-
- @abstractmethod
- def _on_relation_changed_event(self, event: RelationChangedEvent) -> None:
- raise NotImplementedError
-
- def fetch_relation_data(self) -> dict:
- """Retrieves data from relation.
-
- This function can be used to retrieve data from a relation
- in the charm code when outside an event callback.
- Function cannot be used in `*-relation-broken` events and will raise an exception.
-
- Returns:
- a dict of the values stored in the relation data bag
- for all relation instances (indexed by the relation ID).
- """
- data = {}
- for relation in self.relations:
- data[relation.id] = {
- key: value for key, value in relation.data[relation.app].items() if key != "data"
- }
- return data
-
- def _update_relation_data(self, relation_id: int, data: dict) -> None:
- """Updates a set of key-value pairs in the relation.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- data: dict containing the key-value pairs
- that should be updated in the relation.
- """
- if self.local_unit.is_leader():
- relation = self.charm.model.get_relation(self.relation_name, relation_id)
- relation.data[self.local_app].update(data)
-
- def _diff(self, event: RelationChangedEvent) -> Diff:
- """Retrieves the diff of the data in the relation changed databag.
-
- Args:
- event: relation changed event.
-
- Returns:
- a Diff instance containing the added, deleted and changed
- keys from the event relation databag.
- """
- return diff(event, self.local_unit)
-
- @property
- def relations(self) -> List[Relation]:
- """The list of Relation instances associated with this relation_name."""
- return [
- relation
- for relation in self.charm.model.relations[self.relation_name]
- if self._is_relation_active(relation)
- ]
-
- @staticmethod
- def _is_relation_active(relation: Relation):
- try:
- _ = repr(relation.data)
- return True
- except RuntimeError:
- return False
-
- @staticmethod
- def _is_resource_created_for_relation(relation: Relation):
- return (
- "username" in relation.data[relation.app] and "password" in relation.data[relation.app]
- )
-
- def is_resource_created(self, relation_id: Optional[int] = None) -> bool:
- """Check if the resource has been created.
-
- This function can be used to check if the Provider answered with data in the charm code
- when outside an event callback.
-
- Args:
- relation_id (int, optional): When provided the check is done only for the relation id
- provided, otherwise the check is done for all relations
-
- Returns:
- True or False
-
- Raises:
- IndexError: If relation_id is provided but that relation does not exist
- """
- if relation_id is not None:
- try:
- relation = [relation for relation in self.relations if relation.id == relation_id][
- 0
- ]
- return self._is_resource_created_for_relation(relation)
- except IndexError:
- raise IndexError(f"relation id {relation_id} cannot be accessed")
- else:
- return (
- all(
- [
- self._is_resource_created_for_relation(relation)
- for relation in self.relations
- ]
- )
- if self.relations
- else False
- )
-
-
-# General events
-
-
-class ExtraRoleEvent(RelationEvent):
- """Base class for data events."""
-
- @property
- def extra_user_roles(self) -> Optional[str]:
- """Returns the extra user roles that were requested."""
- return self.relation.data[self.relation.app].get("extra-user-roles")
-
-
-class AuthenticationEvent(RelationEvent):
- """Base class for authentication fields for events."""
-
- @property
- def username(self) -> Optional[str]:
- """Returns the created username."""
- return self.relation.data[self.relation.app].get("username")
-
- @property
- def password(self) -> Optional[str]:
- """Returns the password for the created user."""
- return self.relation.data[self.relation.app].get("password")
-
- @property
- def tls(self) -> Optional[str]:
- """Returns whether TLS is configured."""
- return self.relation.data[self.relation.app].get("tls")
-
- @property
- def tls_ca(self) -> Optional[str]:
- """Returns TLS CA."""
- return self.relation.data[self.relation.app].get("tls-ca")
-
-
-# Database related events and fields
-
-
-class DatabaseProvidesEvent(RelationEvent):
- """Base class for database events."""
-
- @property
- def database(self) -> Optional[str]:
- """Returns the database that was requested."""
- return self.relation.data[self.relation.app].get("database")
-
-
-class DatabaseRequestedEvent(DatabaseProvidesEvent, ExtraRoleEvent):
- """Event emitted when a new database is requested for use on this relation."""
-
-
-class DatabaseProvidesEvents(CharmEvents):
- """Database events.
-
- This class defines the events that the database can emit.
- """
-
- database_requested = EventSource(DatabaseRequestedEvent)
-
-
-class DatabaseRequiresEvent(RelationEvent):
- """Base class for database events."""
-
- @property
- def endpoints(self) -> Optional[str]:
- """Returns a comma separated list of read/write endpoints."""
- return self.relation.data[self.relation.app].get("endpoints")
-
- @property
- def read_only_endpoints(self) -> Optional[str]:
- """Returns a comma separated list of read only endpoints."""
- return self.relation.data[self.relation.app].get("read-only-endpoints")
-
- @property
- def replset(self) -> Optional[str]:
- """Returns the replicaset name.
-
- MongoDB only.
- """
- return self.relation.data[self.relation.app].get("replset")
-
- @property
- def uris(self) -> Optional[str]:
- """Returns the connection URIs.
-
- MongoDB, Redis, OpenSearch.
- """
- return self.relation.data[self.relation.app].get("uris")
-
- @property
- def version(self) -> Optional[str]:
- """Returns the version of the database.
-
- Version as informed by the database daemon.
- """
- return self.relation.data[self.relation.app].get("version")
-
-
-class DatabaseCreatedEvent(AuthenticationEvent, DatabaseRequiresEvent):
- """Event emitted when a new database is created for use on this relation."""
-
-
-class DatabaseEndpointsChangedEvent(AuthenticationEvent, DatabaseRequiresEvent):
- """Event emitted when the read/write endpoints are changed."""
-
-
-class DatabaseReadOnlyEndpointsChangedEvent(AuthenticationEvent, DatabaseRequiresEvent):
- """Event emitted when the read only endpoints are changed."""
-
-
-class DatabaseRequiresEvents(CharmEvents):
- """Database events.
-
- This class defines the events that the database can emit.
- """
-
- database_created = EventSource(DatabaseCreatedEvent)
- endpoints_changed = EventSource(DatabaseEndpointsChangedEvent)
- read_only_endpoints_changed = EventSource(DatabaseReadOnlyEndpointsChangedEvent)
-
-
-# Database Provider and Requires
-
-
-class DatabaseProvides(DataProvides):
- """Provider-side of the database relations."""
-
- on = DatabaseProvidesEvents()
-
- def __init__(self, charm: CharmBase, relation_name: str) -> None:
- super().__init__(charm, relation_name)
-
- def _on_relation_changed(self, event: RelationChangedEvent) -> None:
- """Event emitted when the relation has changed."""
- # Only the leader should handle this event.
- if not self.local_unit.is_leader():
- return
-
- # Check which data has changed to emit customs events.
- diff = self._diff(event)
-
- # Emit a database requested event if the setup key (database name and optional
- # extra user roles) was added to the relation databag by the application.
- if "database" in diff.added:
- self.on.database_requested.emit(event.relation, app=event.app, unit=event.unit)
-
- def set_endpoints(self, relation_id: int, connection_strings: str) -> None:
- """Set database primary connections.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- connection_strings: database hosts and ports comma separated list.
- """
- self._update_relation_data(relation_id, {"endpoints": connection_strings})
-
- def set_read_only_endpoints(self, relation_id: int, connection_strings: str) -> None:
- """Set database replicas connection strings.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- connection_strings: database hosts and ports comma separated list.
- """
- self._update_relation_data(relation_id, {"read-only-endpoints": connection_strings})
-
- def set_replset(self, relation_id: int, replset: str) -> None:
- """Set replica set name in the application relation databag.
-
- MongoDB only.
-
- Args:
- relation_id: the identifier for a particular relation.
- replset: replica set name.
- """
- self._update_relation_data(relation_id, {"replset": replset})
-
- def set_uris(self, relation_id: int, uris: str) -> None:
- """Set the database connection URIs in the application relation databag.
-
- MongoDB, Redis, and OpenSearch only.
-
- Args:
- relation_id: the identifier for a particular relation.
- uris: connection URIs.
- """
- self._update_relation_data(relation_id, {"uris": uris})
-
- def set_version(self, relation_id: int, version: str) -> None:
- """Set the database version in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- version: database version.
- """
- self._update_relation_data(relation_id, {"version": version})
-
-
-class DatabaseRequires(DataRequires):
- """Requires-side of the database relation."""
-
- on = DatabaseRequiresEvents()
-
- def __init__(
- self,
- charm,
- relation_name: str,
- database_name: str,
- extra_user_roles: str = None,
- relations_aliases: List[str] = None,
- ):
- """Manager of database client relations."""
- super().__init__(charm, relation_name, extra_user_roles)
- self.database = database_name
- self.relations_aliases = relations_aliases
-
- # Define custom event names for each alias.
- if relations_aliases:
- # Ensure the number of aliases does not exceed the maximum
- # of connections allowed in the specific relation.
- relation_connection_limit = self.charm.meta.requires[relation_name].limit
- if len(relations_aliases) != relation_connection_limit:
- raise ValueError(
- f"The number of aliases must match the maximum number of connections allowed in the relation. "
- f"Expected {relation_connection_limit}, got {len(relations_aliases)}"
- )
-
- for relation_alias in relations_aliases:
- self.on.define_event(f"{relation_alias}_database_created", DatabaseCreatedEvent)
- self.on.define_event(
- f"{relation_alias}_endpoints_changed", DatabaseEndpointsChangedEvent
- )
- self.on.define_event(
- f"{relation_alias}_read_only_endpoints_changed",
- DatabaseReadOnlyEndpointsChangedEvent,
- )
-
- def _assign_relation_alias(self, relation_id: int) -> None:
- """Assigns an alias to a relation.
-
- This function writes in the unit data bag.
-
- Args:
- relation_id: the identifier for a particular relation.
- """
- # If no aliases were provided, return immediately.
- if not self.relations_aliases:
- return
-
- # Return if an alias was already assigned to this relation
- # (like when there are more than one unit joining the relation).
- if (
- self.charm.model.get_relation(self.relation_name, relation_id)
- .data[self.local_unit]
- .get("alias")
- ):
- return
-
- # Retrieve the available aliases (the ones that weren't assigned to any relation).
- available_aliases = self.relations_aliases[:]
- for relation in self.charm.model.relations[self.relation_name]:
- alias = relation.data[self.local_unit].get("alias")
- if alias:
- logger.debug("Alias %s was already assigned to relation %d", alias, relation.id)
- available_aliases.remove(alias)
-
- # Set the alias in the unit relation databag of the specific relation.
- relation = self.charm.model.get_relation(self.relation_name, relation_id)
- relation.data[self.local_unit].update({"alias": available_aliases[0]})
-
- def _emit_aliased_event(self, event: RelationChangedEvent, event_name: str) -> None:
- """Emit an aliased event to a particular relation if it has an alias.
-
- Args:
- event: the relation changed event that was received.
- event_name: the name of the event to emit.
- """
- alias = self._get_relation_alias(event.relation.id)
- if alias:
- getattr(self.on, f"{alias}_{event_name}").emit(
- event.relation, app=event.app, unit=event.unit
- )
-
- def _get_relation_alias(self, relation_id: int) -> Optional[str]:
- """Returns the relation alias.
-
- Args:
- relation_id: the identifier for a particular relation.
-
- Returns:
- the relation alias or None if the relation was not found.
- """
- for relation in self.charm.model.relations[self.relation_name]:
- if relation.id == relation_id:
- return relation.data[self.local_unit].get("alias")
- return None
-
- def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None:
- """Event emitted when the application joins the database relation."""
- # If relations aliases were provided, assign one to the relation.
- self._assign_relation_alias(event.relation.id)
-
- # Sets both database and extra user roles in the relation
- # if the roles are provided. Otherwise, sets only the database.
- if self.extra_user_roles:
- self._update_relation_data(
- event.relation.id,
- {
- "database": self.database,
- "extra-user-roles": self.extra_user_roles,
- },
- )
- else:
- self._update_relation_data(event.relation.id, {"database": self.database})
-
- def _on_relation_changed_event(self, event: RelationChangedEvent) -> None:
- """Event emitted when the database relation has changed."""
- # Check which data has changed to emit customs events.
- diff = self._diff(event)
-
- # Check if the database is created
- # (the database charm shared the credentials).
- if "username" in diff.added and "password" in diff.added:
- # Emit the default event (the one without an alias).
- logger.info("database created at %s", datetime.now())
- self.on.database_created.emit(event.relation, app=event.app, unit=event.unit)
-
- # Emit the aliased event (if any).
- self._emit_aliased_event(event, "database_created")
-
- # To avoid unnecessary application restarts do not trigger
- # âendpoints_changedâ event if âdatabase_createdâ is triggered.
- return
-
- # Emit an endpoints changed event if the database
- # added or changed this info in the relation databag.
- if "endpoints" in diff.added or "endpoints" in diff.changed:
- # Emit the default event (the one without an alias).
- logger.info("endpoints changed on %s", datetime.now())
- self.on.endpoints_changed.emit(event.relation, app=event.app, unit=event.unit)
-
- # Emit the aliased event (if any).
- self._emit_aliased_event(event, "endpoints_changed")
-
- # To avoid unnecessary application restarts do not trigger
- # âread_only_endpoints_changedâ event if âendpoints_changedâ is triggered.
- return
-
- # Emit a read only endpoints changed event if the database
- # added or changed this info in the relation databag.
- if "read-only-endpoints" in diff.added or "read-only-endpoints" in diff.changed:
- # Emit the default event (the one without an alias).
- logger.info("read-only-endpoints changed on %s", datetime.now())
- self.on.read_only_endpoints_changed.emit(
- event.relation, app=event.app, unit=event.unit
- )
-
- # Emit the aliased event (if any).
- self._emit_aliased_event(event, "read_only_endpoints_changed")
-
-
-# Kafka related events
-
-
-class KafkaProvidesEvent(RelationEvent):
- """Base class for Kafka events."""
-
- @property
- def topic(self) -> Optional[str]:
- """Returns the topic that was requested."""
- return self.relation.data[self.relation.app].get("topic")
-
-
-class TopicRequestedEvent(KafkaProvidesEvent, ExtraRoleEvent):
- """Event emitted when a new topic is requested for use on this relation."""
-
-
-class KafkaProvidesEvents(CharmEvents):
- """Kafka events.
-
- This class defines the events that the Kafka can emit.
- """
-
- topic_requested = EventSource(TopicRequestedEvent)
-
-
-class KafkaRequiresEvent(RelationEvent):
- """Base class for Kafka events."""
-
- @property
- def bootstrap_server(self) -> Optional[str]:
- """Returns a a comma-seperated list of broker uris."""
- return self.relation.data[self.relation.app].get("endpoints")
-
- @property
- def consumer_group_prefix(self) -> Optional[str]:
- """Returns the consumer-group-prefix."""
- return self.relation.data[self.relation.app].get("consumer-group-prefix")
-
- @property
- def zookeeper_uris(self) -> Optional[str]:
- """Returns a comma separated list of Zookeeper uris."""
- return self.relation.data[self.relation.app].get("zookeeper-uris")
-
-
-class TopicCreatedEvent(AuthenticationEvent, KafkaRequiresEvent):
- """Event emitted when a new topic is created for use on this relation."""
-
-
-class BootstrapServerChangedEvent(AuthenticationEvent, KafkaRequiresEvent):
- """Event emitted when the bootstrap server is changed."""
-
-
-class KafkaRequiresEvents(CharmEvents):
- """Kafka events.
-
- This class defines the events that the Kafka can emit.
- """
-
- topic_created = EventSource(TopicCreatedEvent)
- bootstrap_server_changed = EventSource(BootstrapServerChangedEvent)
-
-
-# Kafka Provides and Requires
-
-
-class KafkaProvides(DataProvides):
- """Provider-side of the Kafka relation."""
-
- on = KafkaProvidesEvents()
-
- def __init__(self, charm: CharmBase, relation_name: str) -> None:
- super().__init__(charm, relation_name)
-
- def _on_relation_changed(self, event: RelationChangedEvent) -> None:
- """Event emitted when the relation has changed."""
- # Only the leader should handle this event.
- if not self.local_unit.is_leader():
- return
-
- # Check which data has changed to emit customs events.
- diff = self._diff(event)
-
- # Emit a topic requested event if the setup key (topic name and optional
- # extra user roles) was added to the relation databag by the application.
- if "topic" in diff.added:
- self.on.topic_requested.emit(event.relation, app=event.app, unit=event.unit)
-
- def set_bootstrap_server(self, relation_id: int, bootstrap_server: str) -> None:
- """Set the bootstrap server in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- bootstrap_server: the bootstrap server address.
- """
- self._update_relation_data(relation_id, {"endpoints": bootstrap_server})
-
- def set_consumer_group_prefix(self, relation_id: int, consumer_group_prefix: str) -> None:
- """Set the consumer group prefix in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- consumer_group_prefix: the consumer group prefix string.
- """
- self._update_relation_data(relation_id, {"consumer-group-prefix": consumer_group_prefix})
-
- def set_zookeeper_uris(self, relation_id: int, zookeeper_uris: str) -> None:
- """Set the zookeeper uris in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- zookeeper_uris: comma-seperated list of ZooKeeper server uris.
- """
- self._update_relation_data(relation_id, {"zookeeper-uris": zookeeper_uris})
-
-
-class KafkaRequires(DataRequires):
- """Requires-side of the Kafka relation."""
-
- on = KafkaRequiresEvents()
-
- def __init__(self, charm, relation_name: str, topic: str, extra_user_roles: str = None):
- """Manager of Kafka client relations."""
- # super().__init__(charm, relation_name)
- super().__init__(charm, relation_name, extra_user_roles)
- self.charm = charm
- self.topic = topic
-
- def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None:
- """Event emitted when the application joins the Kafka relation."""
- # Sets both topic and extra user roles in the relation
- # if the roles are provided. Otherwise, sets only the topic.
- self._update_relation_data(
- event.relation.id,
- {
- "topic": self.topic,
- "extra-user-roles": self.extra_user_roles,
- }
- if self.extra_user_roles is not None
- else {"topic": self.topic},
- )
-
- def _on_relation_changed_event(self, event: RelationChangedEvent) -> None:
- """Event emitted when the Kafka relation has changed."""
- # Check which data has changed to emit customs events.
- diff = self._diff(event)
-
- # Check if the topic is created
- # (the Kafka charm shared the credentials).
- if "username" in diff.added and "password" in diff.added:
- # Emit the default event (the one without an alias).
- logger.info("topic created at %s", datetime.now())
- self.on.topic_created.emit(event.relation, app=event.app, unit=event.unit)
-
- # To avoid unnecessary application restarts do not trigger
- # âendpoints_changedâ event if âtopic_createdâ is triggered.
- return
-
- # Emit an endpoints (bootstap-server) changed event if the Kakfa endpoints
- # added or changed this info in the relation databag.
- if "endpoints" in diff.added or "endpoints" in diff.changed:
- # Emit the default event (the one without an alias).
- logger.info("endpoints changed on %s", datetime.now())
- self.on.bootstrap_server_changed.emit(
- event.relation, app=event.app, unit=event.unit
- ) # here check if this is the right design
- return
diff --git a/installers/charm/osm-lcm/lib/charms/kafka_k8s/v0/kafka.py b/installers/charm/osm-lcm/lib/charms/kafka_k8s/v0/kafka.py
deleted file mode 100644
index aeb5edcb..00000000
--- a/installers/charm/osm-lcm/lib/charms/kafka_k8s/v0/kafka.py
+++ /dev/null
@@ -1,200 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-# See LICENSE file for licensing details.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Kafka library.
-
-This [library](https://juju.is/docs/sdk/libraries) implements both sides of the
-`kafka` [interface](https://juju.is/docs/sdk/relations).
-
-The *provider* side of this interface is implemented by the
-[kafka-k8s Charmed Operator](https://charmhub.io/kafka-k8s).
-
-Any Charmed Operator that *requires* Kafka for providing its
-service should implement the *requirer* side of this interface.
-
-In a nutshell using this library to implement a Charmed Operator *requiring*
-Kafka would look like
-
-```
-$ charmcraft fetch-lib charms.kafka_k8s.v0.kafka
-```
-
-`metadata.yaml`:
-
-```
-requires:
- kafka:
- interface: kafka
- limit: 1
-```
-
-`src/charm.py`:
-
-```
-from charms.kafka_k8s.v0.kafka import KafkaEvents, KafkaRequires
-from ops.charm import CharmBase
-
-
-class MyCharm(CharmBase):
-
- on = KafkaEvents()
-
- def __init__(self, *args):
- super().__init__(*args)
- self.kafka = KafkaRequires(self)
- self.framework.observe(
- self.on.kafka_available,
- self._on_kafka_available,
- )
- self.framework.observe(
- self.on["kafka"].relation_broken,
- self._on_kafka_broken,
- )
-
- def _on_kafka_available(self, event):
- # Get Kafka host and port
- host: str = self.kafka.host
- port: int = self.kafka.port
- # host => "kafka-k8s"
- # port => 9092
-
- def _on_kafka_broken(self, event):
- # Stop service
- # ...
- self.unit.status = BlockedStatus("need kafka relation")
-```
-
-You can file bugs
-[here](https://github.com/charmed-osm/kafka-k8s-operator/issues)!
-"""
-
-from typing import Optional
-
-from ops.charm import CharmBase, CharmEvents
-from ops.framework import EventBase, EventSource, Object
-
-# The unique Charmhub library identifier, never change it
-from ops.model import Relation
-
-LIBID = "eacc8c85082347c9aae740e0220b8376"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 4
-
-
-KAFKA_HOST_APP_KEY = "host"
-KAFKA_PORT_APP_KEY = "port"
-
-
-class _KafkaAvailableEvent(EventBase):
- """Event emitted when Kafka is available."""
-
-
-class KafkaEvents(CharmEvents):
- """Kafka events.
-
- This class defines the events that Kafka can emit.
-
- Events:
- kafka_available (_KafkaAvailableEvent)
- """
-
- kafka_available = EventSource(_KafkaAvailableEvent)
-
-
-class KafkaRequires(Object):
- """Requires-side of the Kafka relation."""
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "kafka") -> None:
- super().__init__(charm, endpoint_name)
- self.charm = charm
- self._endpoint_name = endpoint_name
-
- # Observe relation events
- event_observe_mapping = {
- charm.on[self._endpoint_name].relation_changed: self._on_relation_changed,
- }
- for event, observer in event_observe_mapping.items():
- self.framework.observe(event, observer)
-
- def _on_relation_changed(self, event) -> None:
- if event.relation.app and all(
- key in event.relation.data[event.relation.app]
- for key in (KAFKA_HOST_APP_KEY, KAFKA_PORT_APP_KEY)
- ):
- self.charm.on.kafka_available.emit()
-
- @property
- def host(self) -> str:
- """Get kafka hostname."""
- relation: Relation = self.model.get_relation(self._endpoint_name)
- return (
- relation.data[relation.app].get(KAFKA_HOST_APP_KEY)
- if relation and relation.app
- else None
- )
-
- @property
- def port(self) -> int:
- """Get kafka port number."""
- relation: Relation = self.model.get_relation(self._endpoint_name)
- return (
- int(relation.data[relation.app].get(KAFKA_PORT_APP_KEY))
- if relation and relation.app
- else None
- )
-
-
-class KafkaProvides(Object):
- """Provides-side of the Kafka relation."""
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "kafka") -> None:
- super().__init__(charm, endpoint_name)
- self._endpoint_name = endpoint_name
-
- def set_host_info(self, host: str, port: int, relation: Optional[Relation] = None) -> None:
- """Set Kafka host and port.
-
- This function writes in the application data of the relation, therefore,
- only the unit leader can call it.
-
- Args:
- host (str): Kafka hostname or IP address.
- port (int): Kafka port.
- relation (Optional[Relation]): Relation to update.
- If not specified, all relations will be updated.
-
- Raises:
- Exception: if a non-leader unit calls this function.
- """
- if not self.model.unit.is_leader():
- raise Exception("only the leader set host information.")
-
- if relation:
- self._update_relation_data(host, port, relation)
- return
-
- for relation in self.model.relations[self._endpoint_name]:
- self._update_relation_data(host, port, relation)
-
- def _update_relation_data(self, host: str, port: int, relation: Relation) -> None:
- """Update data in relation if needed."""
- relation.data[self.model.app][KAFKA_HOST_APP_KEY] = host
- relation.data[self.model.app][KAFKA_PORT_APP_KEY] = str(port)
diff --git a/installers/charm/osm-lcm/lib/charms/osm_libs/v0/utils.py b/installers/charm/osm-lcm/lib/charms/osm_libs/v0/utils.py
deleted file mode 100644
index d739ba68..00000000
--- a/installers/charm/osm-lcm/lib/charms/osm_libs/v0/utils.py
+++ /dev/null
@@ -1,544 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-# See LICENSE file for licensing details.
-# http://www.apache.org/licenses/LICENSE-2.0
-"""OSM Utils Library.
-
-This library offers some utilities made for but not limited to Charmed OSM.
-
-# Getting started
-
-Execute the following command inside your Charmed Operator folder to fetch the library.
-
-```shell
-charmcraft fetch-lib charms.osm_libs.v0.utils
-```
-
-# CharmError Exception
-
-An exception that takes to arguments, the message and the StatusBase class, which are useful
-to set the status of the charm when the exception raises.
-
-Example:
-```shell
-from charms.osm_libs.v0.utils import CharmError
-
-class MyCharm(CharmBase):
- def _on_config_changed(self, _):
- try:
- if not self.config.get("some-option"):
- raise CharmError("need some-option", BlockedStatus)
-
- if not self.mysql_ready:
- raise CharmError("waiting for mysql", WaitingStatus)
-
- # Do stuff...
-
- exception CharmError as e:
- self.unit.status = e.status
-```
-
-# Pebble validations
-
-The `check_container_ready` function checks that a container is ready,
-and therefore Pebble is ready.
-
-The `check_service_active` function checks that a service in a container is running.
-
-Both functions raise a CharmError if the validations fail.
-
-Example:
-```shell
-from charms.osm_libs.v0.utils import check_container_ready, check_service_active
-
-class MyCharm(CharmBase):
- def _on_config_changed(self, _):
- try:
- container: Container = self.unit.get_container("my-container")
- check_container_ready(container)
- check_service_active(container, "my-service")
- # Do stuff...
-
- exception CharmError as e:
- self.unit.status = e.status
-```
-
-# Debug-mode
-
-The debug-mode allows OSM developers to easily debug OSM modules.
-
-Example:
-```shell
-from charms.osm_libs.v0.utils import DebugMode
-
-class MyCharm(CharmBase):
- _stored = StoredState()
-
- def __init__(self, _):
- # ...
- container: Container = self.unit.get_container("my-container")
- hostpaths = [
- HostPath(
- config="module-hostpath",
- container_path="/usr/lib/python3/dist-packages/module"
- ),
- ]
- vscode_workspace_path = "files/vscode-workspace.json"
- self.debug_mode = DebugMode(
- self,
- self._stored,
- container,
- hostpaths,
- vscode_workspace_path,
- )
-
- def _on_update_status(self, _):
- if self.debug_mode.started:
- return
- # ...
-
- def _get_debug_mode_information(self):
- command = self.debug_mode.command
- password = self.debug_mode.password
- return command, password
-```
-
-# More
-
-- Get pod IP with `get_pod_ip()`
-"""
-from dataclasses import dataclass
-import logging
-import secrets
-import socket
-from pathlib import Path
-from typing import List
-
-from lightkube import Client
-from lightkube.models.core_v1 import HostPathVolumeSource, Volume, VolumeMount
-from lightkube.resources.apps_v1 import StatefulSet
-from ops.charm import CharmBase
-from ops.framework import Object, StoredState
-from ops.model import (
- ActiveStatus,
- BlockedStatus,
- Container,
- MaintenanceStatus,
- StatusBase,
- WaitingStatus,
-)
-from ops.pebble import ServiceStatus
-
-# The unique Charmhub library identifier, never change it
-LIBID = "e915908eebee4cdd972d484728adf984"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 5
-
-logger = logging.getLogger(__name__)
-
-
-class CharmError(Exception):
- """Charm Error Exception."""
-
- def __init__(self, message: str, status_class: StatusBase = BlockedStatus) -> None:
- self.message = message
- self.status_class = status_class
- self.status = status_class(message)
-
-
-def check_container_ready(container: Container) -> None:
- """Check Pebble has started in the container.
-
- Args:
- container (Container): Container to be checked.
-
- Raises:
- CharmError: if container is not ready.
- """
- if not container.can_connect():
- raise CharmError("waiting for pebble to start", MaintenanceStatus)
-
-
-def check_service_active(container: Container, service_name: str) -> None:
- """Check if the service is running.
-
- Args:
- container (Container): Container to be checked.
- service_name (str): Name of the service to check.
-
- Raises:
- CharmError: if the service is not running.
- """
- if service_name not in container.get_plan().services:
- raise CharmError(f"{service_name} service not configured yet", WaitingStatus)
-
- if container.get_service(service_name).current != ServiceStatus.ACTIVE:
- raise CharmError(f"{service_name} service is not running")
-
-
-def get_pod_ip() -> str:
- """Get Kubernetes Pod IP.
-
- Returns:
- str: The IP of the Pod.
- """
- return socket.gethostbyname(socket.gethostname())
-
-
-_DEBUG_SCRIPT = r"""#!/bin/bash
-# Install SSH
-
-function download_code(){{
- wget https://go.microsoft.com/fwlink/?LinkID=760868 -O code.deb
-}}
-
-function setup_envs(){{
- grep "source /debug.envs" /root/.bashrc || echo "source /debug.envs" | tee -a /root/.bashrc
-}}
-function setup_ssh(){{
- apt install ssh -y
- cat /etc/ssh/sshd_config |
- grep -E '^PermitRootLogin yes$$' || (
- echo PermitRootLogin yes |
- tee -a /etc/ssh/sshd_config
- )
- service ssh stop
- sleep 3
- service ssh start
- usermod --password $(echo {} | openssl passwd -1 -stdin) root
-}}
-
-function setup_code(){{
- apt install libasound2 -y
- (dpkg -i code.deb || apt-get install -f -y || apt-get install -f -y) && echo Code installed successfully
- code --install-extension ms-python.python --user-data-dir /root
- mkdir -p /root/.vscode-server
- cp -R /root/.vscode/extensions /root/.vscode-server/extensions
-}}
-
-export DEBIAN_FRONTEND=noninteractive
-apt update && apt install wget -y
-download_code &
-setup_ssh &
-setup_envs
-wait
-setup_code &
-wait
-"""
-
-
-@dataclass
-class SubModule:
- """Represent RO Submodules."""
- sub_module_path: str
- container_path: str
-
-
-class HostPath:
- """Represents a hostpath."""
- def __init__(self, config: str, container_path: str, submodules: dict = None) -> None:
- mount_path_items = config.split("-")
- mount_path_items.reverse()
- self.mount_path = "/" + "/".join(mount_path_items)
- self.config = config
- self.sub_module_dict = {}
- if submodules:
- for submodule in submodules.keys():
- self.sub_module_dict[submodule] = SubModule(
- sub_module_path=self.mount_path + "/" + submodule + "/" + submodules[submodule].split("/")[-1],
- container_path=submodules[submodule],
- )
- else:
- self.container_path = container_path
- self.module_name = container_path.split("/")[-1]
-
-class DebugMode(Object):
- """Class to handle the debug-mode."""
-
- def __init__(
- self,
- charm: CharmBase,
- stored: StoredState,
- container: Container,
- hostpaths: List[HostPath] = [],
- vscode_workspace_path: str = "files/vscode-workspace.json",
- ) -> None:
- super().__init__(charm, "debug-mode")
-
- self.charm = charm
- self._stored = stored
- self.hostpaths = hostpaths
- self.vscode_workspace = Path(vscode_workspace_path).read_text()
- self.container = container
-
- self._stored.set_default(
- debug_mode_started=False,
- debug_mode_vscode_command=None,
- debug_mode_password=None,
- )
-
- self.framework.observe(self.charm.on.config_changed, self._on_config_changed)
- self.framework.observe(self.charm.on[container.name].pebble_ready, self._on_config_changed)
- self.framework.observe(self.charm.on.update_status, self._on_update_status)
-
- def _on_config_changed(self, _) -> None:
- """Handler for the config-changed event."""
- if not self.charm.unit.is_leader():
- return
-
- debug_mode_enabled = self.charm.config.get("debug-mode", False)
- action = self.enable if debug_mode_enabled else self.disable
- action()
-
- def _on_update_status(self, _) -> None:
- """Handler for the update-status event."""
- if not self.charm.unit.is_leader() or not self.started:
- return
-
- self.charm.unit.status = ActiveStatus("debug-mode: ready")
-
- @property
- def started(self) -> bool:
- """Indicates whether the debug-mode has started or not."""
- return self._stored.debug_mode_started
-
- @property
- def command(self) -> str:
- """Command to launch vscode."""
- return self._stored.debug_mode_vscode_command
-
- @property
- def password(self) -> str:
- """SSH password."""
- return self._stored.debug_mode_password
-
- def enable(self, service_name: str = None) -> None:
- """Enable debug-mode.
-
- This function mounts hostpaths of the OSM modules (if set), and
- configures the container so it can be easily debugged. The setup
- includes the configuration of SSH, environment variables, and
- VSCode workspace and plugins.
-
- Args:
- service_name (str, optional): Pebble service name which has the desired environment
- variables. Mandatory if there is more than one Pebble service configured.
- """
- hostpaths_to_reconfigure = self._hostpaths_to_reconfigure()
- if self.started and not hostpaths_to_reconfigure:
- self.charm.unit.status = ActiveStatus("debug-mode: ready")
- return
-
- logger.debug("enabling debug-mode")
-
- # Mount hostpaths if set.
- # If hostpaths are mounted, the statefulset will be restarted,
- # and for that reason we return immediately. On restart, the hostpaths
- # won't be mounted and then we can continue and setup the debug-mode.
- if hostpaths_to_reconfigure:
- self.charm.unit.status = MaintenanceStatus("debug-mode: configuring hostpaths")
- self._configure_hostpaths(hostpaths_to_reconfigure)
- return
-
- self.charm.unit.status = MaintenanceStatus("debug-mode: starting")
- password = secrets.token_hex(8)
- self._setup_debug_mode(
- password,
- service_name,
- mounted_hostpaths=[hp for hp in self.hostpaths if self.charm.config.get(hp.config)],
- )
-
- self._stored.debug_mode_vscode_command = self._get_vscode_command(get_pod_ip())
- self._stored.debug_mode_password = password
- self._stored.debug_mode_started = True
- logger.info("debug-mode is ready")
- self.charm.unit.status = ActiveStatus("debug-mode: ready")
-
- def disable(self) -> None:
- """Disable debug-mode."""
- logger.debug("disabling debug-mode")
- current_status = self.charm.unit.status
- hostpaths_unmounted = self._unmount_hostpaths()
-
- if not self._stored.debug_mode_started:
- return
- self._stored.debug_mode_started = False
- self._stored.debug_mode_vscode_command = None
- self._stored.debug_mode_password = None
-
- if not hostpaths_unmounted:
- self.charm.unit.status = current_status
- self._restart()
-
- def _hostpaths_to_reconfigure(self) -> List[HostPath]:
- hostpaths_to_reconfigure: List[HostPath] = []
- client = Client()
- statefulset = client.get(StatefulSet, self.charm.app.name, namespace=self.charm.model.name)
- volumes = statefulset.spec.template.spec.volumes
-
- for hostpath in self.hostpaths:
- hostpath_is_set = True if self.charm.config.get(hostpath.config) else False
- hostpath_already_configured = next(
- (True for volume in volumes if volume.name == hostpath.config), False
- )
- if hostpath_is_set != hostpath_already_configured:
- hostpaths_to_reconfigure.append(hostpath)
-
- return hostpaths_to_reconfigure
-
- def _setup_debug_mode(
- self,
- password: str,
- service_name: str = None,
- mounted_hostpaths: List[HostPath] = [],
- ) -> None:
- services = self.container.get_plan().services
- if not service_name and len(services) != 1:
- raise Exception("Cannot start debug-mode: please set the service_name")
-
- service = None
- if not service_name:
- service_name, service = services.popitem()
- if not service:
- service = services.get(service_name)
-
- logger.debug(f"getting environment variables from service {service_name}")
- environment = service.environment
- environment_file_content = "\n".join(
- [f'export {key}="{value}"' for key, value in environment.items()]
- )
- logger.debug(f"pushing environment file to {self.container.name} container")
- self.container.push("/debug.envs", environment_file_content)
-
- # Push VSCode workspace
- logger.debug(f"pushing vscode workspace to {self.container.name} container")
- self.container.push("/debug.code-workspace", self.vscode_workspace)
-
- # Execute debugging script
- logger.debug(f"pushing debug-mode setup script to {self.container.name} container")
- self.container.push("/debug.sh", _DEBUG_SCRIPT.format(password), permissions=0o777)
- logger.debug(f"executing debug-mode setup script in {self.container.name} container")
- self.container.exec(["/debug.sh"]).wait_output()
- logger.debug(f"stopping service {service_name} in {self.container.name} container")
- self.container.stop(service_name)
-
- # Add symlinks to mounted hostpaths
- for hostpath in mounted_hostpaths:
- logger.debug(f"adding symlink for {hostpath.config}")
- if len(hostpath.sub_module_dict) > 0:
- for sub_module in hostpath.sub_module_dict.keys():
- self.container.exec(["rm", "-rf", hostpath.sub_module_dict[sub_module].container_path]).wait_output()
- self.container.exec(
- [
- "ln",
- "-s",
- hostpath.sub_module_dict[sub_module].sub_module_path,
- hostpath.sub_module_dict[sub_module].container_path,
- ]
- )
-
- else:
- self.container.exec(["rm", "-rf", hostpath.container_path]).wait_output()
- self.container.exec(
- [
- "ln",
- "-s",
- f"{hostpath.mount_path}/{hostpath.module_name}",
- hostpath.container_path,
- ]
- )
-
- def _configure_hostpaths(self, hostpaths: List[HostPath]):
- client = Client()
- statefulset = client.get(StatefulSet, self.charm.app.name, namespace=self.charm.model.name)
-
- for hostpath in hostpaths:
- if self.charm.config.get(hostpath.config):
- self._add_hostpath_to_statefulset(hostpath, statefulset)
- else:
- self._delete_hostpath_from_statefulset(hostpath, statefulset)
-
- client.replace(statefulset)
-
- def _unmount_hostpaths(self) -> bool:
- client = Client()
- hostpath_unmounted = False
- statefulset = client.get(StatefulSet, self.charm.app.name, namespace=self.charm.model.name)
-
- for hostpath in self.hostpaths:
- if self._delete_hostpath_from_statefulset(hostpath, statefulset):
- hostpath_unmounted = True
-
- if hostpath_unmounted:
- client.replace(statefulset)
-
- return hostpath_unmounted
-
- def _add_hostpath_to_statefulset(self, hostpath: HostPath, statefulset: StatefulSet):
- # Add volume
- logger.debug(f"adding volume {hostpath.config} to {self.charm.app.name} statefulset")
- volume = Volume(
- hostpath.config,
- hostPath=HostPathVolumeSource(
- path=self.charm.config[hostpath.config],
- type="Directory",
- ),
- )
- statefulset.spec.template.spec.volumes.append(volume)
-
- # Add volumeMount
- for statefulset_container in statefulset.spec.template.spec.containers:
- if statefulset_container.name != self.container.name:
- continue
-
- logger.debug(
- f"adding volumeMount {hostpath.config} to {self.container.name} container"
- )
- statefulset_container.volumeMounts.append(
- VolumeMount(mountPath=hostpath.mount_path, name=hostpath.config)
- )
-
- def _delete_hostpath_from_statefulset(self, hostpath: HostPath, statefulset: StatefulSet):
- hostpath_unmounted = False
- for volume in statefulset.spec.template.spec.volumes:
-
- if hostpath.config != volume.name:
- continue
-
- # Remove volumeMount
- for statefulset_container in statefulset.spec.template.spec.containers:
- if statefulset_container.name != self.container.name:
- continue
- for volume_mount in statefulset_container.volumeMounts:
- if volume_mount.name != hostpath.config:
- continue
-
- logger.debug(
- f"removing volumeMount {hostpath.config} from {self.container.name} container"
- )
- statefulset_container.volumeMounts.remove(volume_mount)
-
- # Remove volume
- logger.debug(
- f"removing volume {hostpath.config} from {self.charm.app.name} statefulset"
- )
- statefulset.spec.template.spec.volumes.remove(volume)
-
- hostpath_unmounted = True
- return hostpath_unmounted
-
- def _get_vscode_command(
- self,
- pod_ip: str,
- user: str = "root",
- workspace_path: str = "/debug.code-workspace",
- ) -> str:
- return f"code --remote ssh-remote+{user}@{pod_ip} {workspace_path}"
-
- def _restart(self):
- self.container.exec(["kill", "-HUP", "1"])
diff --git a/installers/charm/osm-lcm/lib/charms/osm_ro/v0/ro.py b/installers/charm/osm-lcm/lib/charms/osm_ro/v0/ro.py
deleted file mode 100644
index 79bee5e7..00000000
--- a/installers/charm/osm-lcm/lib/charms/osm_ro/v0/ro.py
+++ /dev/null
@@ -1,178 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# Learn more at: https://juju.is/docs/sdk
-
-"""Ro library.
-
-This [library](https://juju.is/docs/sdk/libraries) implements both sides of the
-`ro` [interface](https://juju.is/docs/sdk/relations).
-
-The *provider* side of this interface is implemented by the
-[osm-ro Charmed Operator](https://charmhub.io/osm-ro).
-
-Any Charmed Operator that *requires* RO for providing its
-service should implement the *requirer* side of this interface.
-
-In a nutshell using this library to implement a Charmed Operator *requiring*
-RO would look like
-
-```
-$ charmcraft fetch-lib charms.osm_ro.v0.ro
-```
-
-`metadata.yaml`:
-
-```
-requires:
- ro:
- interface: ro
- limit: 1
-```
-
-`src/charm.py`:
-
-```
-from charms.osm_ro.v0.ro import RoRequires
-from ops.charm import CharmBase
-
-
-class MyCharm(CharmBase):
-
- def __init__(self, *args):
- super().__init__(*args)
- self.ro = RoRequires(self)
- self.framework.observe(
- self.on["ro"].relation_changed,
- self._on_ro_relation_changed,
- )
- self.framework.observe(
- self.on["ro"].relation_broken,
- self._on_ro_relation_broken,
- )
- self.framework.observe(
- self.on["ro"].relation_broken,
- self._on_ro_broken,
- )
-
- def _on_ro_relation_broken(self, event):
- # Get RO host and port
- host: str = self.ro.host
- port: int = self.ro.port
- # host => "osm-ro"
- # port => 9999
-
- def _on_ro_broken(self, event):
- # Stop service
- # ...
- self.unit.status = BlockedStatus("need ro relation")
-```
-
-You can file bugs
-[here](https://osm.etsi.org/bugzilla/enter_bug.cgi), selecting the `devops` module!
-"""
-from typing import Optional
-
-from ops.charm import CharmBase, CharmEvents
-from ops.framework import EventBase, EventSource, Object
-from ops.model import Relation
-
-
-# The unique Charmhub library identifier, never change it
-LIBID = "a34c3331a43f4f6db2b1499ff4d1390d"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 1
-
-RO_HOST_APP_KEY = "host"
-RO_PORT_APP_KEY = "port"
-
-
-class RoRequires(Object): # pragma: no cover
- """Requires-side of the Ro relation."""
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "ro") -> None:
- super().__init__(charm, endpoint_name)
- self.charm = charm
- self._endpoint_name = endpoint_name
-
- @property
- def host(self) -> str:
- """Get ro hostname."""
- relation: Relation = self.model.get_relation(self._endpoint_name)
- return (
- relation.data[relation.app].get(RO_HOST_APP_KEY)
- if relation and relation.app
- else None
- )
-
- @property
- def port(self) -> int:
- """Get ro port number."""
- relation: Relation = self.model.get_relation(self._endpoint_name)
- return (
- int(relation.data[relation.app].get(RO_PORT_APP_KEY))
- if relation and relation.app
- else None
- )
-
-
-class RoProvides(Object):
- """Provides-side of the Ro relation."""
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "ro") -> None:
- super().__init__(charm, endpoint_name)
- self._endpoint_name = endpoint_name
-
- def set_host_info(self, host: str, port: int, relation: Optional[Relation] = None) -> None:
- """Set Ro host and port.
-
- This function writes in the application data of the relation, therefore,
- only the unit leader can call it.
-
- Args:
- host (str): Ro hostname or IP address.
- port (int): Ro port.
- relation (Optional[Relation]): Relation to update.
- If not specified, all relations will be updated.
-
- Raises:
- Exception: if a non-leader unit calls this function.
- """
- if not self.model.unit.is_leader():
- raise Exception("only the leader set host information.")
-
- if relation:
- self._update_relation_data(host, port, relation)
- return
-
- for relation in self.model.relations[self._endpoint_name]:
- self._update_relation_data(host, port, relation)
-
- def _update_relation_data(self, host: str, port: int, relation: Relation) -> None:
- """Update data in relation if needed."""
- relation.data[self.model.app][RO_HOST_APP_KEY] = host
- relation.data[self.model.app][RO_PORT_APP_KEY] = str(port)
diff --git a/installers/charm/osm-lcm/lib/charms/osm_vca_integrator/v0/vca.py b/installers/charm/osm-lcm/lib/charms/osm_vca_integrator/v0/vca.py
deleted file mode 100644
index 21dac69c..00000000
--- a/installers/charm/osm-lcm/lib/charms/osm_vca_integrator/v0/vca.py
+++ /dev/null
@@ -1,221 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-# See LICENSE file for licensing details.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""VCA Library.
-
-VCA stands for VNF Configuration and Abstraction, and is one of the core components
-of OSM. The Juju Controller is in charged of this role.
-
-This [library](https://juju.is/docs/sdk/libraries) implements both sides of the
-`vca` [interface](https://juju.is/docs/sdk/relations).
-
-The *provider* side of this interface is implemented by the
-[osm-vca-integrator Charmed Operator](https://charmhub.io/osm-vca-integrator).
-
-helps to integrate with the
-vca-integrator charm, which provides data needed to the OSM components that need
-to talk to the VCA, and
-
-Any Charmed OSM component that *requires* to talk to the VCA should implement
-the *requirer* side of this interface.
-
-In a nutshell using this library to implement a Charmed Operator *requiring* VCA data
-would look like
-
-```
-$ charmcraft fetch-lib charms.osm_vca_integrator.v0.vca
-```
-
-`metadata.yaml`:
-
-```
-requires:
- vca:
- interface: osm-vca
-```
-
-`src/charm.py`:
-
-```
-from charms.osm_vca_integrator.v0.vca import VcaData, VcaIntegratorEvents, VcaRequires
-from ops.charm import CharmBase
-
-
-class MyCharm(CharmBase):
-
- on = VcaIntegratorEvents()
-
- def __init__(self, *args):
- super().__init__(*args)
- self.vca = VcaRequires(self)
- self.framework.observe(
- self.on.vca_data_changed,
- self._on_vca_data_changed,
- )
-
- def _on_vca_data_changed(self, event):
- # Get Vca data
- data: VcaData = self.vca.data
- # data.endpoints => "localhost:17070"
-```
-
-You can file bugs
-[here](https://github.com/charmed-osm/osm-vca-integrator-operator/issues)!
-"""
-
-import json
-import logging
-from typing import Any, Dict, Optional
-
-from ops.charm import CharmBase, CharmEvents, RelationChangedEvent
-from ops.framework import EventBase, EventSource, Object
-
-# The unique Charmhub library identifier, never change it
-from ops.model import Relation
-
-# The unique Charmhub library identifier, never change it
-LIBID = "746b36c382984e5c8660b78192d84ef9"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 3
-
-
-logger = logging.getLogger(__name__)
-
-
-class VcaDataChangedEvent(EventBase):
- """Event emitted whenever there is a change in the vca data."""
-
- def __init__(self, handle):
- super().__init__(handle)
-
-
-class VcaIntegratorEvents(CharmEvents):
- """VCA Integrator events.
-
- This class defines the events that ZooKeeper can emit.
-
- Events:
- vca_data_changed (_VcaDataChanged)
- """
-
- vca_data_changed = EventSource(VcaDataChangedEvent)
-
-
-RELATION_MANDATORY_KEYS = ("endpoints", "user", "secret", "public-key", "cacert", "model-configs")
-
-
-class VcaData:
- """Vca data class."""
-
- def __init__(self, data: Dict[str, Any]) -> None:
- self.data: str = data
- self.endpoints: str = data["endpoints"]
- self.user: str = data["user"]
- self.secret: str = data["secret"]
- self.public_key: str = data["public-key"]
- self.cacert: str = data["cacert"]
- self.lxd_cloud: str = data.get("lxd-cloud")
- self.lxd_credentials: str = data.get("lxd-credentials")
- self.k8s_cloud: str = data.get("k8s-cloud")
- self.k8s_credentials: str = data.get("k8s-credentials")
- self.model_configs: Dict[str, Any] = data.get("model-configs", {})
-
-
-class VcaDataMissingError(Exception):
- """Data missing exception."""
-
-
-class VcaRequires(Object):
- """Requires part of the vca relation.
-
- Attributes:
- endpoint_name: Endpoint name of the charm for the vca relation.
- data: Vca data from the relation.
- """
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "vca") -> None:
- super().__init__(charm, endpoint_name)
- self._charm = charm
- self.endpoint_name = endpoint_name
- self.framework.observe(charm.on[endpoint_name].relation_changed, self._on_relation_changed)
-
- @property
- def data(self) -> Optional[VcaData]:
- """Vca data from the relation."""
- relation: Relation = self.model.get_relation(self.endpoint_name)
- if not relation or relation.app not in relation.data:
- logger.debug("no application data in the event")
- return
-
- relation_data: Dict = dict(relation.data[relation.app])
- relation_data["model-configs"] = json.loads(relation_data.get("model-configs", "{}"))
- try:
- self._validate_relation_data(relation_data)
- return VcaData(relation_data)
- except VcaDataMissingError as e:
- logger.warning(e)
-
- def _on_relation_changed(self, event: RelationChangedEvent) -> None:
- if event.app not in event.relation.data:
- logger.debug("no application data in the event")
- return
-
- relation_data = event.relation.data[event.app]
- try:
- self._validate_relation_data(relation_data)
- self._charm.on.vca_data_changed.emit()
- except VcaDataMissingError as e:
- logger.warning(e)
-
- def _validate_relation_data(self, relation_data: Dict[str, str]) -> None:
- if not all(required_key in relation_data for required_key in RELATION_MANDATORY_KEYS):
- raise VcaDataMissingError("vca data not ready yet")
-
- clouds = ("lxd-cloud", "k8s-cloud")
- if not any(cloud in relation_data for cloud in clouds):
- raise VcaDataMissingError("no clouds defined yet")
-
-
-class VcaProvides(Object):
- """Provides part of the vca relation.
-
- Attributes:
- endpoint_name: Endpoint name of the charm for the vca relation.
- """
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "vca") -> None:
- super().__init__(charm, endpoint_name)
- self.endpoint_name = endpoint_name
-
- def update_vca_data(self, vca_data: VcaData) -> None:
- """Update vca data in relation.
-
- Args:
- vca_data: VcaData object.
- """
- relation: Relation
- for relation in self.model.relations[self.endpoint_name]:
- if not relation or self.model.app not in relation.data:
- logger.debug("relation app data not ready yet")
- for key, value in vca_data.data.items():
- if key == "model-configs":
- value = json.dumps(value)
- relation.data[self.model.app][key] = value
diff --git a/installers/charm/osm-lcm/metadata.yaml b/installers/charm/osm-lcm/metadata.yaml
deleted file mode 100644
index b7dfa3d4..00000000
--- a/installers/charm/osm-lcm/metadata.yaml
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Overview on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-name: osm-lcm
-
-# The following metadata are human-readable and will be published prominently on Charmhub.
-
-display-name: OSM LCM
-
-summary: OSM Lifecycle Management (LCM)
-
-description: |
- A Kubernetes operator that deploys the OSM's Lifecycle Management (LCM).
-
- osm-lcm is the Lightweight Build Life Cycle Management for OSM.
- It interact with RO module for resource orchestration and N2VC for VNF configuration.
-
- This charm doesn't make sense on its own.
- See more:
- - https://charmhub.io/osm
-
-containers:
- lcm:
- resource: lcm-image
-
-# This file populates the Resources tab on Charmhub.
-
-resources:
- lcm-image:
- type: oci-image
- description: OCI image for lcm
- upstream-source: opensourcemano/lcm
-
-requires:
- kafka:
- interface: kafka
- limit: 1
- mongodb:
- interface: mongodb_client
- limit: 1
- ro:
- interface: ro
- limit: 1
- vca:
- interface: osm-vca
diff --git a/installers/charm/osm-lcm/pyproject.toml b/installers/charm/osm-lcm/pyproject.toml
deleted file mode 100644
index 16cf0f4b..00000000
--- a/installers/charm/osm-lcm/pyproject.toml
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-
-# Testing tools configuration
-[tool.coverage.run]
-branch = true
-
-[tool.coverage.report]
-show_missing = true
-
-[tool.pytest.ini_options]
-minversion = "6.0"
-log_cli_level = "INFO"
-
-# Formatting tools configuration
-[tool.black]
-line-length = 99
-target-version = ["py38"]
-
-[tool.isort]
-profile = "black"
-
-# Linting tools configuration
-[tool.flake8]
-max-line-length = 99
-max-doc-length = 99
-max-complexity = 10
-exclude = [".git", "__pycache__", ".tox", "build", "dist", "*.egg_info", "venv"]
-select = ["E", "W", "F", "C", "N", "R", "D", "H"]
-# Ignore W503, E501 because using black creates errors with this
-# Ignore D107 Missing docstring in __init__
-ignore = ["W503", "E501", "D107"]
-# D100, D101, D102, D103: Ignore missing docstrings in tests
-per-file-ignores = ["tests/*:D100,D101,D102,D103,D104"]
-docstring-convention = "google"
diff --git a/installers/charm/osm-lcm/requirements.txt b/installers/charm/osm-lcm/requirements.txt
deleted file mode 100644
index 398d4ad3..00000000
--- a/installers/charm/osm-lcm/requirements.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-ops < 2.2
-lightkube
-lightkube-models
-# git+https://github.com/charmed-osm/config-validator/
diff --git a/installers/charm/osm-lcm/src/charm.py b/installers/charm/osm-lcm/src/charm.py
deleted file mode 100755
index 2ea90860..00000000
--- a/installers/charm/osm-lcm/src/charm.py
+++ /dev/null
@@ -1,290 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# Learn more at: https://juju.is/docs/sdk
-
-"""OSM LCM charm.
-
-See more: https://charmhub.io/osm
-"""
-
-import logging
-from typing import Any, Dict
-
-from charms.data_platform_libs.v0.data_interfaces import DatabaseRequires
-from charms.kafka_k8s.v0.kafka import KafkaRequires, _KafkaAvailableEvent
-from charms.osm_libs.v0.utils import (
- CharmError,
- DebugMode,
- HostPath,
- check_container_ready,
- check_service_active,
-)
-from charms.osm_ro.v0.ro import RoRequires
-from charms.osm_vca_integrator.v0.vca import VcaDataChangedEvent, VcaRequires
-from ops.charm import ActionEvent, CharmBase, CharmEvents
-from ops.framework import EventSource, StoredState
-from ops.main import main
-from ops.model import ActiveStatus, Container
-
-HOSTPATHS = [
- HostPath(
- config="lcm-hostpath",
- container_path="/usr/lib/python3/dist-packages/osm_lcm",
- ),
- HostPath(
- config="common-hostpath",
- container_path="/usr/lib/python3/dist-packages/osm_common",
- ),
- HostPath(
- config="n2vc-hostpath",
- container_path="/usr/lib/python3/dist-packages/n2vc",
- ),
-]
-
-logger = logging.getLogger(__name__)
-
-
-class LcmEvents(CharmEvents):
- """LCM events."""
-
- vca_data_changed = EventSource(VcaDataChangedEvent)
- kafka_available = EventSource(_KafkaAvailableEvent)
-
-
-class OsmLcmCharm(CharmBase):
- """OSM LCM Kubernetes sidecar charm."""
-
- container_name = "lcm"
- service_name = "lcm"
- on = LcmEvents()
- _stored = StoredState()
-
- def __init__(self, *args):
- super().__init__(*args)
- self.vca = VcaRequires(self)
- self.kafka = KafkaRequires(self)
- self.mongodb_client = DatabaseRequires(
- self, "mongodb", database_name="osm", extra_user_roles="admin"
- )
- self._observe_charm_events()
- self.ro = RoRequires(self)
- self.container: Container = self.unit.get_container(self.container_name)
- self.debug_mode = DebugMode(self, self._stored, self.container, HOSTPATHS)
-
- # ---------------------------------------------------------------------------
- # Handlers for Charm Events
- # ---------------------------------------------------------------------------
-
- def _on_config_changed(self, _) -> None:
- """Handler for the config-changed event."""
- try:
- self._validate_config()
- self._check_relations()
- # Check if the container is ready.
- # Eventually it will become ready after the first pebble-ready event.
- check_container_ready(self.container)
- if not self.debug_mode.started:
- self._configure_service(self.container)
-
- # Update charm status
- self._on_update_status()
- except CharmError as e:
- logger.debug(e.message)
- self.unit.status = e.status
-
- def _on_update_status(self, _=None) -> None:
- """Handler for the update-status event."""
- try:
- self._validate_config()
- self._check_relations()
- check_container_ready(self.container)
- if self.debug_mode.started:
- return
- check_service_active(self.container, self.service_name)
- self.unit.status = ActiveStatus()
- except CharmError as e:
- logger.debug(e.message)
- self.unit.status = e.status
-
- def _on_required_relation_broken(self, _) -> None:
- """Handler for required relation-broken events."""
- try:
- check_container_ready(self.container)
- check_service_active(self.container, self.service_name)
- self.container.stop(self.container_name)
- except CharmError:
- pass
- self._on_update_status()
-
- def _on_get_debug_mode_information_action(self, event: ActionEvent) -> None:
- """Handler for the get-debug-mode-information action event."""
- if not self.debug_mode.started:
- event.fail(
- f"debug-mode has not started. Hint: juju config {self.app.name} debug-mode=true"
- )
- return
-
- debug_info = {"command": self.debug_mode.command, "password": self.debug_mode.password}
- event.set_results(debug_info)
-
- # ---------------------------------------------------------------------------
- # Validation, configuration and more
- # ---------------------------------------------------------------------------
-
- def _validate_config(self) -> None:
- """Validate charm configuration.
-
- Raises:
- CharmError: if charm configuration is invalid.
- """
- logger.debug("validating charm config")
- if self.config["log-level"].upper() not in [
- "TRACE",
- "DEBUG",
- "INFO",
- "WARN",
- "ERROR",
- "FATAL",
- ]:
- raise CharmError("invalid value for log-level option")
-
- def _observe_charm_events(self) -> None:
- event_handler_mapping = {
- # Core lifecycle events
- self.on.lcm_pebble_ready: self._on_config_changed,
- self.on.config_changed: self._on_config_changed,
- self.on.update_status: self._on_update_status,
- # Relation events
- self.on.kafka_available: self._on_config_changed,
- self.on["kafka"].relation_broken: self._on_required_relation_broken,
- self.mongodb_client.on.database_created: self._on_config_changed,
- self.on["mongodb"].relation_broken: self._on_required_relation_broken,
- self.on["ro"].relation_changed: self._on_config_changed,
- self.on["ro"].relation_broken: self._on_required_relation_broken,
- self.on.vca_data_changed: self._on_config_changed,
- self.on["vca"].relation_broken: self._on_config_changed,
- # Action events
- self.on.get_debug_mode_information_action: self._on_get_debug_mode_information_action,
- }
- for event, handler in event_handler_mapping.items():
- self.framework.observe(event, handler)
-
- def _check_relations(self) -> None:
- """Validate charm relations.
-
- Raises:
- CharmError: if charm configuration is invalid.
- """
- logger.debug("check for missing relations")
- missing_relations = []
-
- if not self.kafka.host or not self.kafka.port:
- missing_relations.append("kafka")
- if not self._is_database_available():
- missing_relations.append("mongodb")
- if not self.ro.host or not self.ro.port:
- missing_relations.append("ro")
-
- if missing_relations:
- relations_str = ", ".join(missing_relations)
- one_relation_missing = len(missing_relations) == 1
- error_msg = f'need {relations_str} relation{"" if one_relation_missing else "s"}'
- logger.warning(error_msg)
- raise CharmError(error_msg)
-
- def _is_database_available(self) -> bool:
- try:
- return self.mongodb_client.is_resource_created()
- except KeyError:
- return False
-
- def _configure_service(self, container: Container) -> None:
- """Add Pebble layer with the lcm service."""
- logger.debug(f"configuring {self.app.name} service")
- container.add_layer("lcm", self._get_layer(), combine=True)
- container.replan()
-
- def _get_layer(self) -> Dict[str, Any]:
- """Get layer for Pebble."""
- environments = {
- # General configuration
- "OSMLCM_GLOBAL_LOGLEVEL": self.config["log-level"].upper(),
- # Kafka configuration
- "OSMLCM_MESSAGE_DRIVER": "kafka",
- "OSMLCM_MESSAGE_HOST": self.kafka.host,
- "OSMLCM_MESSAGE_PORT": self.kafka.port,
- # RO configuration
- "OSMLCM_RO_HOST": self.ro.host,
- "OSMLCM_RO_PORT": self.ro.port,
- "OSMLCM_RO_TENANT": "osm",
- # Database configuration
- "OSMLCM_DATABASE_DRIVER": "mongo",
- "OSMLCM_DATABASE_URI": self._get_mongodb_uri(),
- "OSMLCM_DATABASE_COMMONKEY": self.config["database-commonkey"],
- # Storage configuration
- "OSMLCM_STORAGE_DRIVER": "mongo",
- "OSMLCM_STORAGE_PATH": "/app/storage",
- "OSMLCM_STORAGE_COLLECTION": "files",
- "OSMLCM_STORAGE_URI": self._get_mongodb_uri(),
- "OSMLCM_VCA_HELM_CA_CERTS": self.config["helm-ca-certs"],
- "OSMLCM_VCA_STABLEREPOURL": self.config["helm-stable-repo-url"],
- }
- # Vca configuration
- if self.vca.data:
- environments["OSMLCM_VCA_ENDPOINTS"] = self.vca.data.endpoints
- environments["OSMLCM_VCA_USER"] = self.vca.data.user
- environments["OSMLCM_VCA_PUBKEY"] = self.vca.data.public_key
- environments["OSMLCM_VCA_SECRET"] = self.vca.data.secret
- environments["OSMLCM_VCA_CACERT"] = self.vca.data.cacert
- if self.vca.data.lxd_cloud:
- environments["OSMLCM_VCA_CLOUD"] = self.vca.data.lxd_cloud
-
- if self.vca.data.k8s_cloud:
- environments["OSMLCM_VCA_K8S_CLOUD"] = self.vca.data.k8s_cloud
- for key, value in self.vca.data.model_configs.items():
- env_name = f'OSMLCM_VCA_MODEL_CONFIG_{key.upper().replace("-","_")}'
- environments[env_name] = value
-
- layer_config = {
- "summary": "lcm layer",
- "description": "pebble config layer for nbi",
- "services": {
- self.service_name: {
- "override": "replace",
- "summary": "lcm service",
- "command": "python3 -m osm_lcm.lcm",
- "startup": "enabled",
- "user": "appuser",
- "group": "appuser",
- "environment": environments,
- }
- },
- }
- return layer_config
-
- def _get_mongodb_uri(self):
- return list(self.mongodb_client.fetch_relation_data().values())[0]["uris"]
-
-
-if __name__ == "__main__": # pragma: no cover
- main(OsmLcmCharm)
diff --git a/installers/charm/osm-lcm/src/legacy_interfaces.py b/installers/charm/osm-lcm/src/legacy_interfaces.py
deleted file mode 100644
index d56f31df..00000000
--- a/installers/charm/osm-lcm/src/legacy_interfaces.py
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-# flake8: noqa
-
-import ops
-
-
-class BaseRelationClient(ops.framework.Object):
- """Requires side of a Kafka Endpoint"""
-
- def __init__(
- self, charm: ops.charm.CharmBase, relation_name: str, mandatory_fields: list = []
- ):
- super().__init__(charm, relation_name)
- self.relation_name = relation_name
- self.mandatory_fields = mandatory_fields
- self._update_relation()
-
- def get_data_from_unit(self, key: str):
- if not self.relation:
- # This update relation doesn't seem to be needed, but I added it because apparently
- # the data is empty in the unit tests.
- # In reality, the constructor is called in every hook.
- # In the unit tests when doing an update_relation_data, apparently it is not called.
- self._update_relation()
- if self.relation:
- for unit in self.relation.units:
- data = self.relation.data[unit].get(key)
- if data:
- return data
-
- def get_data_from_app(self, key: str):
- if not self.relation or self.relation.app not in self.relation.data:
- # This update relation doesn't seem to be needed, but I added it because apparently
- # the data is empty in the unit tests.
- # In reality, the constructor is called in every hook.
- # In the unit tests when doing an update_relation_data, apparently it is not called.
- self._update_relation()
- if self.relation and self.relation.app in self.relation.data:
- data = self.relation.data[self.relation.app].get(key)
- if data:
- return data
-
- def is_missing_data_in_unit(self):
- return not all([self.get_data_from_unit(field) for field in self.mandatory_fields])
-
- def is_missing_data_in_app(self):
- return not all([self.get_data_from_app(field) for field in self.mandatory_fields])
-
- def _update_relation(self):
- self.relation = self.framework.model.get_relation(self.relation_name)
-
-
-class MongoClient(BaseRelationClient):
- """Requires side of a Mongo Endpoint"""
-
- mandatory_fields_mapping = {
- "reactive": ["connection_string"],
- "ops": ["replica_set_uri", "replica_set_name"],
- }
-
- def __init__(self, charm: ops.charm.CharmBase, relation_name: str):
- super().__init__(charm, relation_name, mandatory_fields=[])
-
- @property
- def connection_string(self):
- if self.is_opts():
- replica_set_uri = self.get_data_from_unit("replica_set_uri")
- replica_set_name = self.get_data_from_unit("replica_set_name")
- return f"{replica_set_uri}?replicaSet={replica_set_name}"
- else:
- return self.get_data_from_unit("connection_string")
-
- def is_opts(self):
- return not self.is_missing_data_in_unit_ops()
-
- def is_missing_data_in_unit(self):
- return self.is_missing_data_in_unit_ops() and self.is_missing_data_in_unit_reactive()
-
- def is_missing_data_in_unit_ops(self):
- return not all(
- [self.get_data_from_unit(field) for field in self.mandatory_fields_mapping["ops"]]
- )
-
- def is_missing_data_in_unit_reactive(self):
- return not all(
- [self.get_data_from_unit(field) for field in self.mandatory_fields_mapping["reactive"]]
- )
diff --git a/installers/charm/osm-lcm/tests/integration/test_charm.py b/installers/charm/osm-lcm/tests/integration/test_charm.py
deleted file mode 100644
index 00bb2603..00000000
--- a/installers/charm/osm-lcm/tests/integration/test_charm.py
+++ /dev/null
@@ -1,218 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-# Learn more about testing at: https://juju.is/docs/sdk/testing
-
-import asyncio
-import logging
-import shlex
-from pathlib import Path
-
-import pytest
-import yaml
-from pytest_operator.plugin import OpsTest
-
-logger = logging.getLogger(__name__)
-
-METADATA = yaml.safe_load(Path("./metadata.yaml").read_text())
-LCM_APP = METADATA["name"]
-KAFKA_CHARM = "kafka-k8s"
-KAFKA_APP = "kafka"
-MONGO_DB_CHARM = "mongodb-k8s"
-MONGO_DB_APP = "mongodb"
-RO_CHARM = "osm-ro"
-RO_APP = "ro"
-ZOOKEEPER_CHARM = "zookeeper-k8s"
-ZOOKEEPER_APP = "zookeeper"
-VCA_CHARM = "osm-vca-integrator"
-VCA_APP = "vca"
-APPS = [KAFKA_APP, MONGO_DB_APP, ZOOKEEPER_APP, RO_APP, LCM_APP]
-
-
-@pytest.mark.abort_on_fail
-async def test_lcm_is_deployed(ops_test: OpsTest):
- charm = await ops_test.build_charm(".")
- resources = {"lcm-image": METADATA["resources"]["lcm-image"]["upstream-source"]}
- ro_deploy_cmd = f"juju deploy {RO_CHARM} {RO_APP} --resource ro-image=opensourcemano/ro:testing-daily --channel=latest/beta --series=jammy"
-
- await asyncio.gather(
- ops_test.model.deploy(
- charm, resources=resources, application_name=LCM_APP, series="jammy"
- ),
- # RO charm has to be deployed differently since
- # bug https://github.com/juju/python-libjuju/issues/822
- # deploys different charms wrt cli
- ops_test.run(*shlex.split(ro_deploy_cmd), check=True),
- ops_test.model.deploy(KAFKA_CHARM, application_name=KAFKA_APP, channel="stable"),
- ops_test.model.deploy(MONGO_DB_CHARM, application_name=MONGO_DB_APP, channel="5/edge"),
- ops_test.model.deploy(ZOOKEEPER_CHARM, application_name=ZOOKEEPER_APP, channel="stable"),
- )
-
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS,
- timeout=300,
- )
- assert ops_test.model.applications[LCM_APP].status == "blocked"
- unit = ops_test.model.applications[LCM_APP].units[0]
- assert unit.workload_status_message == "need kafka, mongodb, ro relations"
-
- logger.info("Adding relations for other components")
- await ops_test.model.add_relation(KAFKA_APP, ZOOKEEPER_APP)
- await ops_test.model.add_relation(
- "{}:mongodb".format(RO_APP), "{}:database".format(MONGO_DB_APP)
- )
- await ops_test.model.add_relation(RO_APP, KAFKA_APP)
-
- logger.info("Adding relations for LCM")
- await ops_test.model.add_relation(
- "{}:mongodb".format(LCM_APP), "{}:database".format(MONGO_DB_APP)
- )
- await ops_test.model.add_relation(LCM_APP, KAFKA_APP)
- await ops_test.model.add_relation(LCM_APP, RO_APP)
-
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS,
- status="active",
- timeout=300,
- )
-
-
-@pytest.mark.abort_on_fail
-async def test_lcm_scales_up(ops_test: OpsTest):
- logger.info("Scaling up osm-lcm")
- expected_units = 3
- assert len(ops_test.model.applications[LCM_APP].units) == 1
- await ops_test.model.applications[LCM_APP].scale(expected_units)
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=[LCM_APP], status="active", timeout=1000, wait_for_exact_units=expected_units
- )
-
-
-@pytest.mark.abort_on_fail
-@pytest.mark.parametrize("relation_to_remove", [RO_APP, KAFKA_APP, MONGO_DB_APP])
-async def test_lcm_blocks_without_relation(ops_test: OpsTest, relation_to_remove):
- logger.info("Removing relation: %s", relation_to_remove)
- # mongoDB relation is named "database"
- local_relation = relation_to_remove
- if relation_to_remove == MONGO_DB_APP:
- local_relation = "database"
- await asyncio.gather(
- ops_test.model.applications[relation_to_remove].remove_relation(local_relation, LCM_APP)
- )
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(apps=[LCM_APP])
- assert ops_test.model.applications[LCM_APP].status == "blocked"
- for unit in ops_test.model.applications[LCM_APP].units:
- assert unit.workload_status_message == f"need {relation_to_remove} relation"
- await ops_test.model.add_relation(LCM_APP, relation_to_remove)
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS,
- status="active",
- timeout=300,
- )
-
-
-@pytest.mark.abort_on_fail
-async def test_lcm_action_debug_mode_disabled(ops_test: OpsTest):
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS,
- status="active",
- timeout=300,
- )
- logger.info("Running action 'get-debug-mode-information'")
- action = (
- await ops_test.model.applications[LCM_APP]
- .units[0]
- .run_action("get-debug-mode-information")
- )
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(apps=[LCM_APP])
- status = await ops_test.model.get_action_status(uuid_or_prefix=action.entity_id)
- assert status[action.entity_id] == "failed"
-
-
-@pytest.mark.abort_on_fail
-async def test_lcm_action_debug_mode_enabled(ops_test: OpsTest):
- await ops_test.model.applications[LCM_APP].set_config({"debug-mode": "true"})
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS,
- status="active",
- timeout=1000,
- )
- logger.info("Running action 'get-debug-mode-information'")
- # list of units is not ordered
- unit_id = list(
- filter(
- lambda x: (x.entity_id == f"{LCM_APP}/0"), ops_test.model.applications[LCM_APP].units
- )
- )[0]
- action = await unit_id.run_action("get-debug-mode-information")
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(apps=[LCM_APP])
- status = await ops_test.model.get_action_status(uuid_or_prefix=action.entity_id)
- message = await ops_test.model.get_action_output(action_uuid=action.entity_id)
- assert status[action.entity_id] == "completed"
- assert "command" in message
- assert "password" in message
-
-
-@pytest.mark.abort_on_fail
-async def test_lcm_integration_vca(ops_test: OpsTest):
- await asyncio.gather(
- ops_test.model.deploy(
- VCA_CHARM, application_name=VCA_APP, channel="latest/beta", series="jammy"
- ),
- )
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=[VCA_APP],
- timeout=300,
- )
- controllers = (Path.home() / ".local/share/juju/controllers.yaml").read_text()
- accounts = (Path.home() / ".local/share/juju/accounts.yaml").read_text()
- public_key = (Path.home() / ".local/share/juju/ssh/juju_id_rsa.pub").read_text()
- await ops_test.model.applications[VCA_APP].set_config(
- {
- "controllers": controllers,
- "accounts": accounts,
- "public-key": public_key,
- "k8s-cloud": "microk8s",
- }
- )
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS + [VCA_APP],
- status="active",
- timeout=1000,
- )
- await ops_test.model.add_relation(LCM_APP, VCA_APP)
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS + [VCA_APP],
- status="active",
- timeout=300,
- )
diff --git a/installers/charm/osm-lcm/tests/unit/test_charm.py b/installers/charm/osm-lcm/tests/unit/test_charm.py
deleted file mode 100644
index 41cfb007..00000000
--- a/installers/charm/osm-lcm/tests/unit/test_charm.py
+++ /dev/null
@@ -1,88 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-# Learn more about testing at: https://juju.is/docs/sdk/testing
-
-import pytest
-from ops.model import ActiveStatus, BlockedStatus
-from ops.testing import Harness
-from pytest_mock import MockerFixture
-
-from charm import CharmError, OsmLcmCharm, check_service_active
-
-container_name = "lcm"
-service_name = "lcm"
-
-
-@pytest.fixture
-def harness(mocker: MockerFixture):
- harness = Harness(OsmLcmCharm)
- harness.begin()
- harness.container_pebble_ready(container_name)
- yield harness
- harness.cleanup()
-
-
-def test_missing_relations(harness: Harness):
- harness.charm.on.config_changed.emit()
- assert type(harness.charm.unit.status) == BlockedStatus
- assert all(
- relation in harness.charm.unit.status.message for relation in ["mongodb", "kafka", "ro"]
- )
-
-
-def test_ready(harness: Harness):
- _add_relations(harness)
- assert harness.charm.unit.status == ActiveStatus()
-
-
-def test_container_stops_after_relation_broken(harness: Harness):
- harness.charm.on[container_name].pebble_ready.emit(container_name)
- container = harness.charm.unit.get_container(container_name)
- relation_ids = _add_relations(harness)
- check_service_active(container, service_name)
- harness.remove_relation(relation_ids[0])
- with pytest.raises(CharmError):
- check_service_active(container, service_name)
-
-
-def _add_relations(harness: Harness):
- relation_ids = []
- # Add mongo relation
- relation_id = harness.add_relation("mongodb", "mongodb")
- harness.add_relation_unit(relation_id, "mongodb/0")
- harness.update_relation_data(
- relation_id,
- "mongodb",
- {"uris": "mongodb://:1234", "username": "user", "password": "password"},
- )
- relation_ids.append(relation_id)
- # Add kafka relation
- relation_id = harness.add_relation("kafka", "kafka")
- harness.add_relation_unit(relation_id, "kafka/0")
- harness.update_relation_data(relation_id, "kafka", {"host": "kafka", "port": "9092"})
- relation_ids.append(relation_id)
- # Add ro relation
- relation_id = harness.add_relation("ro", "ro")
- harness.add_relation_unit(relation_id, "ro/0")
- harness.update_relation_data(relation_id, "ro", {"host": "ro", "port": "9090"})
- relation_ids.append(relation_id)
- return relation_ids
diff --git a/installers/charm/osm-lcm/tox.ini b/installers/charm/osm-lcm/tox.ini
deleted file mode 100644
index 2d95eca6..00000000
--- a/installers/charm/osm-lcm/tox.ini
+++ /dev/null
@@ -1,92 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-
-[tox]
-skipsdist=True
-skip_missing_interpreters = True
-envlist = lint, unit, integration
-
-[vars]
-src_path = {toxinidir}/src/
-tst_path = {toxinidir}/tests/
-all_path = {[vars]src_path} {[vars]tst_path}
-
-[testenv]
-basepython = python3.8
-setenv =
- PYTHONPATH = {toxinidir}:{toxinidir}/lib:{[vars]src_path}
- PYTHONBREAKPOINT=ipdb.set_trace
- PY_COLORS=1
-passenv =
- PYTHONPATH
- CHARM_BUILD_DIR
- MODEL_SETTINGS
-
-[testenv:fmt]
-description = Apply coding style standards to code
-deps =
- black
- isort
-commands =
- isort {[vars]all_path}
- black {[vars]all_path}
-
-[testenv:lint]
-description = Check code against coding style standards
-deps =
- black
- flake8
- flake8-docstrings
- flake8-builtins
- pyproject-flake8
- pep8-naming
- isort
- codespell
-commands =
- codespell {toxinidir} --skip {toxinidir}/.git --skip {toxinidir}/.tox \
- --skip {toxinidir}/build --skip {toxinidir}/lib --skip {toxinidir}/venv \
- --skip {toxinidir}/.mypy_cache --skip {toxinidir}/icon.svg
- # pflake8 wrapper supports config from pyproject.toml
- pflake8 {[vars]all_path}
- isort --check-only --diff {[vars]all_path}
- black --check --diff {[vars]all_path}
-
-[testenv:unit]
-description = Run unit tests
-deps =
- pytest
- pytest-mock
- coverage[toml]
- -r{toxinidir}/requirements.txt
-commands =
- coverage run --source={[vars]src_path} \
- -m pytest --ignore={[vars]tst_path}integration -v --tb native -s {posargs}
- coverage report
- coverage xml
-
-[testenv:integration]
-description = Run integration tests
-deps =
- pytest
- juju<3
- pytest-operator
- -r{toxinidir}/requirements.txt
-commands =
- pytest -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs} --cloud microk8s
diff --git a/installers/charm/osm-mon/.gitignore b/installers/charm/osm-mon/.gitignore
deleted file mode 100644
index 87d0a587..00000000
--- a/installers/charm/osm-mon/.gitignore
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-venv/
-build/
-*.charm
-.tox/
-.coverage
-coverage.xml
-__pycache__/
-*.py[cod]
-.vscode
\ No newline at end of file
diff --git a/installers/charm/osm-mon/.jujuignore b/installers/charm/osm-mon/.jujuignore
deleted file mode 100644
index 17c7a8bb..00000000
--- a/installers/charm/osm-mon/.jujuignore
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-/venv
-*.py[cod]
-*.charm
diff --git a/installers/charm/osm-mon/CONTRIBUTING.md b/installers/charm/osm-mon/CONTRIBUTING.md
deleted file mode 100644
index 1ade9b30..00000000
--- a/installers/charm/osm-mon/CONTRIBUTING.md
+++ /dev/null
@@ -1,78 +0,0 @@
-
-
-# Contributing
-
-## Overview
-
-This documents explains the processes and practices recommended for contributing enhancements to
-this operator.
-
-- Generally, before developing enhancements to this charm, you should consider [opening an issue
- ](https://osm.etsi.org/bugzilla/enter_bug.cgi?product=OSM) explaining your use case. (Component=devops, version=master)
-- If you would like to chat with us about your use-cases or proposed implementation, you can reach
- us at [OSM Juju public channel](https://opensourcemano.slack.com/archives/C027KJGPECA).
-- Familiarising yourself with the [Charmed Operator Framework](https://juju.is/docs/sdk) library
- will help you a lot when working on new features or bug fixes.
-- All enhancements require review before being merged. Code review typically examines
- - code quality
- - test coverage
- - user experience for Juju administrators this charm.
-- Please help us out in ensuring easy to review branches by rebasing your gerrit patch onto
- the `master` branch.
-
-## Developing
-
-You can use the environments created by `tox` for development:
-
-```shell
-tox --notest -e unit
-source .tox/unit/bin/activate
-```
-
-### Testing
-
-```shell
-tox -e fmt # update your code according to linting rules
-tox -e lint # code style
-tox -e unit # unit tests
-tox -e integration # integration tests
-tox # runs 'lint' and 'unit' environments
-```
-
-## Build charm
-
-Build the charm in this git repository using:
-
-```shell
-charmcraft pack
-```
-
-### Deploy
-
-```bash
-# Create a model
-juju add-model dev
-# Enable DEBUG logging
-juju model-config logging-config="=INFO;unit=DEBUG"
-# Deploy the charm
-juju deploy ./osm-mon_ubuntu-22.04-amd64.charm \
- --resource mon-image=opensourcemano/mon:testing-daily --series jammy
-```
diff --git a/installers/charm/osm-mon/LICENSE b/installers/charm/osm-mon/LICENSE
deleted file mode 100644
index 7e9d5046..00000000
--- a/installers/charm/osm-mon/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2022 Canonical Ltd.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/installers/charm/osm-mon/README.md b/installers/charm/osm-mon/README.md
deleted file mode 100644
index 8d4eb22a..00000000
--- a/installers/charm/osm-mon/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-
-
-
-
-# OSM MON
-
-Charmhub package name: osm-mon
-More information: https://charmhub.io/osm-mon
-
-## Other resources
-
-* [Read more](https://osm.etsi.org/docs/user-guide/latest/)
-
-* [Contributing](https://osm.etsi.org/gitweb/?p=osm/devops.git;a=blob;f=installers/charm/osm-mon/CONTRIBUTING.md)
-
-* See the [Juju SDK documentation](https://juju.is/docs/sdk) for more information about developing and improving charms.
-
diff --git a/installers/charm/osm-mon/actions.yaml b/installers/charm/osm-mon/actions.yaml
deleted file mode 100644
index 0d73468f..00000000
--- a/installers/charm/osm-mon/actions.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Actions tab on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-get-debug-mode-information:
- description: Get information to debug the container
diff --git a/installers/charm/osm-mon/charmcraft.yaml b/installers/charm/osm-mon/charmcraft.yaml
deleted file mode 100644
index f5e3ff37..00000000
--- a/installers/charm/osm-mon/charmcraft.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-
-type: charm
-bases:
- - build-on:
- - name: "ubuntu"
- channel: "22.04"
- run-on:
- - name: "ubuntu"
- channel: "22.04"
-
-parts:
- charm:
- # build-packages:
- # - git
- prime:
- - files/*
diff --git a/installers/charm/osm-mon/config.yaml b/installers/charm/osm-mon/config.yaml
deleted file mode 100644
index cb2eb99c..00000000
--- a/installers/charm/osm-mon/config.yaml
+++ /dev/null
@@ -1,140 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Configure tab on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-options:
- log-level:
- default: "INFO"
- description: |
- Set the Logging Level.
-
- Options:
- - TRACE
- - DEBUG
- - INFO
- - WARN
- - ERROR
- - FATAL
- type: string
- database-commonkey:
- description: Database COMMON KEY
- type: string
- default: osm
- openstack-default-granularity:
- description: Openstack default granularity
- type: int
- default: 300
- global-request-timeout:
- description: Global request timeout
- type: int
- default: 10
- collector-interval:
- description: Collector interval
- type: int
- default: 30
- evaluator-interval:
- description: Evaluator interval
- type: int
- default: 30
- grafana-url:
- description: Grafana URL
- type: string
- default: http://grafana:3000
- grafana-user:
- description: Grafana user
- type: string
- default: admin
- grafana-password:
- description: Grafana password
- type: string
- default: admin
- keystone-enabled:
- description: MON will use Keystone backend
- type: boolean
- default: false
- vm-infra-metrics:
- description: Enables querying the VIMs asking for the status of the VMs
- type: boolean
- default: true
- certificates:
- type: string
- description: |
- comma-separated list of : certificates.
- Where:
- name: name of the file for the certificate
- content: base64 content of the certificate
- The path for the files is /certs.
-
- # Debug-mode options
- debug-mode:
- type: boolean
- description: |
- Great for OSM Developers! (Not recommended for production deployments)
-
- This action activates the Debug Mode, which sets up the container to be ready for debugging.
- As part of the setup, SSH is enabled and a VSCode workspace file is automatically populated.
-
- After enabling the debug-mode, execute the following command to get the information you need
- to start debugging:
- `juju run-action get-debug-mode-information --wait`
-
- The previous command returns the command you need to execute, and the SSH password that was set.
-
- See also:
- - https://charmhub.io/osm-mon/configure#mon-hostpath
- - https://charmhub.io/osm-mon/configure#common-hostpath
- - https://charmhub.io/osm-mon/configure#n2vc-hostpath
- default: false
- mon-hostpath:
- type: string
- description: |
- Set this config to the local path of the MON module to persist the changes done during the
- debug-mode session.
-
- Example:
- $ git clone "https://osm.etsi.org/gerrit/osm/MON" /home/ubuntu/MON
- $ juju config mon mon-hostpath=/home/ubuntu/MON
-
- This configuration only applies if option `debug-mode` is set to true.
- common-hostpath:
- type: string
- description: |
- Set this config to the local path of the common module to persist the changes done during the
- debug-mode session.
-
- Example:
- $ git clone "https://osm.etsi.org/gerrit/osm/common" /home/ubuntu/common
- $ juju config mon common-hostpath=/home/ubuntu/common
-
- This configuration only applies if option `debug-mode` is set to true.
- n2vc-hostpath:
- type: string
- description: |
- Set this config to the local path of the N2VC module to persist the changes done during the
- debug-mode session.
-
- Example:
- $ git clone "https://osm.etsi.org/gerrit/osm/N2VC" /home/ubuntu/N2VC
- $ juju config mon n2vc-hostpath=/home/ubuntu/N2VC
-
- This configuration only applies if option `debug-mode` is set to true.
diff --git a/installers/charm/osm-mon/files/vscode-workspace.json b/installers/charm/osm-mon/files/vscode-workspace.json
deleted file mode 100644
index 34c77180..00000000
--- a/installers/charm/osm-mon/files/vscode-workspace.json
+++ /dev/null
@@ -1,49 +0,0 @@
-{
- "folders": [
- {"path": "/usr/lib/python3/dist-packages/osm_mon"},
- {"path": "/usr/lib/python3/dist-packages/osm_common"},
- {"path": "/usr/lib/python3/dist-packages/n2vc"},
- ],
- "settings": {},
- "launch": {
- "version": "0.2.0",
- "configurations": [
- {
- "name": "MON",
- "type": "python",
- "request": "launch",
- "module": "osm_mon.nbi",
- "justMyCode": false,
- }
-
- {
- "name": "MON Server",
- "type": "python",
- "request": "launch",
- "module": "osm_mon.cmd.mon_server",
- "justMyCode": false,
- },
- {
- "name": "MON evaluator",
- "type": "python",
- "request": "launch",
- "module": "osm_mon.cmd.mon_evaluator",
- "justMyCode": false,
- },
- {
- "name": "MON collector",
- "type": "python",
- "request": "launch",
- "module": "osm_mon.cmd.mon_collector",
- "justMyCode": false,
- },
- {
- "name": "MON dashboarder",
- "type": "python",
- "request": "launch",
- "module": "osm_mon.cmd.mon_dashboarder",
- "justMyCode": false,
- },
- ],
- }
-}
\ No newline at end of file
diff --git a/installers/charm/osm-mon/lib/charms/data_platform_libs/v0/data_interfaces.py b/installers/charm/osm-mon/lib/charms/data_platform_libs/v0/data_interfaces.py
deleted file mode 100644
index b3da5aa4..00000000
--- a/installers/charm/osm-mon/lib/charms/data_platform_libs/v0/data_interfaces.py
+++ /dev/null
@@ -1,1130 +0,0 @@
-# Copyright 2023 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Library to manage the relation for the data-platform products.
-
-This library contains the Requires and Provides classes for handling the relation
-between an application and multiple managed application supported by the data-team:
-MySQL, Postgresql, MongoDB, Redis, and Kakfa.
-
-### Database (MySQL, Postgresql, MongoDB, and Redis)
-
-#### Requires Charm
-This library is a uniform interface to a selection of common database
-metadata, with added custom events that add convenience to database management,
-and methods to consume the application related data.
-
-
-Following an example of using the DatabaseCreatedEvent, in the context of the
-application charm code:
-
-```python
-
-from charms.data_platform_libs.v0.data_interfaces import (
- DatabaseCreatedEvent,
- DatabaseRequires,
-)
-
-class ApplicationCharm(CharmBase):
- # Application charm that connects to database charms.
-
- def __init__(self, *args):
- super().__init__(*args)
-
- # Charm events defined in the database requires charm library.
- self.database = DatabaseRequires(self, relation_name="database", database_name="database")
- self.framework.observe(self.database.on.database_created, self._on_database_created)
-
- def _on_database_created(self, event: DatabaseCreatedEvent) -> None:
- # Handle the created database
-
- # Create configuration file for app
- config_file = self._render_app_config_file(
- event.username,
- event.password,
- event.endpoints,
- )
-
- # Start application with rendered configuration
- self._start_application(config_file)
-
- # Set active status
- self.unit.status = ActiveStatus("received database credentials")
-```
-
-As shown above, the library provides some custom events to handle specific situations,
-which are listed below:
-
-- database_created: event emitted when the requested database is created.
-- endpoints_changed: event emitted when the read/write endpoints of the database have changed.
-- read_only_endpoints_changed: event emitted when the read-only endpoints of the database
- have changed. Event is not triggered if read/write endpoints changed too.
-
-If it is needed to connect multiple database clusters to the same relation endpoint
-the application charm can implement the same code as if it would connect to only
-one database cluster (like the above code example).
-
-To differentiate multiple clusters connected to the same relation endpoint
-the application charm can use the name of the remote application:
-
-```python
-
-def _on_database_created(self, event: DatabaseCreatedEvent) -> None:
- # Get the remote app name of the cluster that triggered this event
- cluster = event.relation.app.name
-```
-
-It is also possible to provide an alias for each different database cluster/relation.
-
-So, it is possible to differentiate the clusters in two ways.
-The first is to use the remote application name, i.e., `event.relation.app.name`, as above.
-
-The second way is to use different event handlers to handle each cluster events.
-The implementation would be something like the following code:
-
-```python
-
-from charms.data_platform_libs.v0.data_interfaces import (
- DatabaseCreatedEvent,
- DatabaseRequires,
-)
-
-class ApplicationCharm(CharmBase):
- # Application charm that connects to database charms.
-
- def __init__(self, *args):
- super().__init__(*args)
-
- # Define the cluster aliases and one handler for each cluster database created event.
- self.database = DatabaseRequires(
- self,
- relation_name="database",
- database_name="database",
- relations_aliases = ["cluster1", "cluster2"],
- )
- self.framework.observe(
- self.database.on.cluster1_database_created, self._on_cluster1_database_created
- )
- self.framework.observe(
- self.database.on.cluster2_database_created, self._on_cluster2_database_created
- )
-
- def _on_cluster1_database_created(self, event: DatabaseCreatedEvent) -> None:
- # Handle the created database on the cluster named cluster1
-
- # Create configuration file for app
- config_file = self._render_app_config_file(
- event.username,
- event.password,
- event.endpoints,
- )
- ...
-
- def _on_cluster2_database_created(self, event: DatabaseCreatedEvent) -> None:
- # Handle the created database on the cluster named cluster2
-
- # Create configuration file for app
- config_file = self._render_app_config_file(
- event.username,
- event.password,
- event.endpoints,
- )
- ...
-
-```
-
-### Provider Charm
-
-Following an example of using the DatabaseRequestedEvent, in the context of the
-database charm code:
-
-```python
-from charms.data_platform_libs.v0.data_interfaces import DatabaseProvides
-
-class SampleCharm(CharmBase):
-
- def __init__(self, *args):
- super().__init__(*args)
- # Charm events defined in the database provides charm library.
- self.provided_database = DatabaseProvides(self, relation_name="database")
- self.framework.observe(self.provided_database.on.database_requested,
- self._on_database_requested)
- # Database generic helper
- self.database = DatabaseHelper()
-
- def _on_database_requested(self, event: DatabaseRequestedEvent) -> None:
- # Handle the event triggered by a new database requested in the relation
- # Retrieve the database name using the charm library.
- db_name = event.database
- # generate a new user credential
- username = self.database.generate_user()
- password = self.database.generate_password()
- # set the credentials for the relation
- self.provided_database.set_credentials(event.relation.id, username, password)
- # set other variables for the relation event.set_tls("False")
-```
-As shown above, the library provides a custom event (database_requested) to handle
-the situation when an application charm requests a new database to be created.
-It's preferred to subscribe to this event instead of relation changed event to avoid
-creating a new database when other information other than a database name is
-exchanged in the relation databag.
-
-### Kafka
-
-This library is the interface to use and interact with the Kafka charm. This library contains
-custom events that add convenience to manage Kafka, and provides methods to consume the
-application related data.
-
-#### Requirer Charm
-
-```python
-
-from charms.data_platform_libs.v0.data_interfaces import (
- BootstrapServerChangedEvent,
- KafkaRequires,
- TopicCreatedEvent,
-)
-
-class ApplicationCharm(CharmBase):
-
- def __init__(self, *args):
- super().__init__(*args)
- self.kafka = KafkaRequires(self, "kafka_client", "test-topic")
- self.framework.observe(
- self.kafka.on.bootstrap_server_changed, self._on_kafka_bootstrap_server_changed
- )
- self.framework.observe(
- self.kafka.on.topic_created, self._on_kafka_topic_created
- )
-
- def _on_kafka_bootstrap_server_changed(self, event: BootstrapServerChangedEvent):
- # Event triggered when a bootstrap server was changed for this application
-
- new_bootstrap_server = event.bootstrap_server
- ...
-
- def _on_kafka_topic_created(self, event: TopicCreatedEvent):
- # Event triggered when a topic was created for this application
- username = event.username
- password = event.password
- tls = event.tls
- tls_ca= event.tls_ca
- bootstrap_server event.bootstrap_server
- consumer_group_prefic = event.consumer_group_prefix
- zookeeper_uris = event.zookeeper_uris
- ...
-
-```
-
-As shown above, the library provides some custom events to handle specific situations,
-which are listed below:
-
-- topic_created: event emitted when the requested topic is created.
-- bootstrap_server_changed: event emitted when the bootstrap server have changed.
-- credential_changed: event emitted when the credentials of Kafka changed.
-
-### Provider Charm
-
-Following the previous example, this is an example of the provider charm.
-
-```python
-class SampleCharm(CharmBase):
-
-from charms.data_platform_libs.v0.data_interfaces import (
- KafkaProvides,
- TopicRequestedEvent,
-)
-
- def __init__(self, *args):
- super().__init__(*args)
-
- # Default charm events.
- self.framework.observe(self.on.start, self._on_start)
-
- # Charm events defined in the Kafka Provides charm library.
- self.kafka_provider = KafkaProvides(self, relation_name="kafka_client")
- self.framework.observe(self.kafka_provider.on.topic_requested, self._on_topic_requested)
- # Kafka generic helper
- self.kafka = KafkaHelper()
-
- def _on_topic_requested(self, event: TopicRequestedEvent):
- # Handle the on_topic_requested event.
-
- topic = event.topic
- relation_id = event.relation.id
- # set connection info in the databag relation
- self.kafka_provider.set_bootstrap_server(relation_id, self.kafka.get_bootstrap_server())
- self.kafka_provider.set_credentials(relation_id, username=username, password=password)
- self.kafka_provider.set_consumer_group_prefix(relation_id, ...)
- self.kafka_provider.set_tls(relation_id, "False")
- self.kafka_provider.set_zookeeper_uris(relation_id, ...)
-
-```
-As shown above, the library provides a custom event (topic_requested) to handle
-the situation when an application charm requests a new topic to be created.
-It is preferred to subscribe to this event instead of relation changed event to avoid
-creating a new topic when other information other than a topic name is
-exchanged in the relation databag.
-"""
-
-import json
-import logging
-from abc import ABC, abstractmethod
-from collections import namedtuple
-from datetime import datetime
-from typing import List, Optional
-
-from ops.charm import (
- CharmBase,
- CharmEvents,
- RelationChangedEvent,
- RelationEvent,
- RelationJoinedEvent,
-)
-from ops.framework import EventSource, Object
-from ops.model import Relation
-
-# The unique Charmhub library identifier, never change it
-LIBID = "6c3e6b6680d64e9c89e611d1a15f65be"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 7
-
-PYDEPS = ["ops>=2.0.0"]
-
-logger = logging.getLogger(__name__)
-
-Diff = namedtuple("Diff", "added changed deleted")
-Diff.__doc__ = """
-A tuple for storing the diff between two data mappings.
-
-added - keys that were added
-changed - keys that still exist but have new values
-deleted - key that were deleted"""
-
-
-def diff(event: RelationChangedEvent, bucket: str) -> Diff:
- """Retrieves the diff of the data in the relation changed databag.
-
- Args:
- event: relation changed event.
- bucket: bucket of the databag (app or unit)
-
- Returns:
- a Diff instance containing the added, deleted and changed
- keys from the event relation databag.
- """
- # Retrieve the old data from the data key in the application relation databag.
- old_data = json.loads(event.relation.data[bucket].get("data", "{}"))
- # Retrieve the new data from the event relation databag.
- new_data = {
- key: value for key, value in event.relation.data[event.app].items() if key != "data"
- }
-
- # These are the keys that were added to the databag and triggered this event.
- added = new_data.keys() - old_data.keys()
- # These are the keys that were removed from the databag and triggered this event.
- deleted = old_data.keys() - new_data.keys()
- # These are the keys that already existed in the databag,
- # but had their values changed.
- changed = {key for key in old_data.keys() & new_data.keys() if old_data[key] != new_data[key]}
- # Convert the new_data to a serializable format and save it for a next diff check.
- event.relation.data[bucket].update({"data": json.dumps(new_data)})
-
- # Return the diff with all possible changes.
- return Diff(added, changed, deleted)
-
-
-# Base DataProvides and DataRequires
-
-
-class DataProvides(Object, ABC):
- """Base provides-side of the data products relation."""
-
- def __init__(self, charm: CharmBase, relation_name: str) -> None:
- super().__init__(charm, relation_name)
- self.charm = charm
- self.local_app = self.charm.model.app
- self.local_unit = self.charm.unit
- self.relation_name = relation_name
- self.framework.observe(
- charm.on[relation_name].relation_changed,
- self._on_relation_changed,
- )
-
- def _diff(self, event: RelationChangedEvent) -> Diff:
- """Retrieves the diff of the data in the relation changed databag.
-
- Args:
- event: relation changed event.
-
- Returns:
- a Diff instance containing the added, deleted and changed
- keys from the event relation databag.
- """
- return diff(event, self.local_app)
-
- @abstractmethod
- def _on_relation_changed(self, event: RelationChangedEvent) -> None:
- """Event emitted when the relation data has changed."""
- raise NotImplementedError
-
- def fetch_relation_data(self) -> dict:
- """Retrieves data from relation.
-
- This function can be used to retrieve data from a relation
- in the charm code when outside an event callback.
-
- Returns:
- a dict of the values stored in the relation data bag
- for all relation instances (indexed by the relation id).
- """
- data = {}
- for relation in self.relations:
- data[relation.id] = {
- key: value for key, value in relation.data[relation.app].items() if key != "data"
- }
- return data
-
- def _update_relation_data(self, relation_id: int, data: dict) -> None:
- """Updates a set of key-value pairs in the relation.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- data: dict containing the key-value pairs
- that should be updated in the relation.
- """
- if self.local_unit.is_leader():
- relation = self.charm.model.get_relation(self.relation_name, relation_id)
- relation.data[self.local_app].update(data)
-
- @property
- def relations(self) -> List[Relation]:
- """The list of Relation instances associated with this relation_name."""
- return list(self.charm.model.relations[self.relation_name])
-
- def set_credentials(self, relation_id: int, username: str, password: str) -> None:
- """Set credentials.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- username: user that was created.
- password: password of the created user.
- """
- self._update_relation_data(
- relation_id,
- {
- "username": username,
- "password": password,
- },
- )
-
- def set_tls(self, relation_id: int, tls: str) -> None:
- """Set whether TLS is enabled.
-
- Args:
- relation_id: the identifier for a particular relation.
- tls: whether tls is enabled (True or False).
- """
- self._update_relation_data(relation_id, {"tls": tls})
-
- def set_tls_ca(self, relation_id: int, tls_ca: str) -> None:
- """Set the TLS CA in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- tls_ca: TLS certification authority.
- """
- self._update_relation_data(relation_id, {"tls_ca": tls_ca})
-
-
-class DataRequires(Object, ABC):
- """Requires-side of the relation."""
-
- def __init__(
- self,
- charm,
- relation_name: str,
- extra_user_roles: str = None,
- ):
- """Manager of base client relations."""
- super().__init__(charm, relation_name)
- self.charm = charm
- self.extra_user_roles = extra_user_roles
- self.local_app = self.charm.model.app
- self.local_unit = self.charm.unit
- self.relation_name = relation_name
- self.framework.observe(
- self.charm.on[relation_name].relation_joined, self._on_relation_joined_event
- )
- self.framework.observe(
- self.charm.on[relation_name].relation_changed, self._on_relation_changed_event
- )
-
- @abstractmethod
- def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None:
- """Event emitted when the application joins the relation."""
- raise NotImplementedError
-
- @abstractmethod
- def _on_relation_changed_event(self, event: RelationChangedEvent) -> None:
- raise NotImplementedError
-
- def fetch_relation_data(self) -> dict:
- """Retrieves data from relation.
-
- This function can be used to retrieve data from a relation
- in the charm code when outside an event callback.
- Function cannot be used in `*-relation-broken` events and will raise an exception.
-
- Returns:
- a dict of the values stored in the relation data bag
- for all relation instances (indexed by the relation ID).
- """
- data = {}
- for relation in self.relations:
- data[relation.id] = {
- key: value for key, value in relation.data[relation.app].items() if key != "data"
- }
- return data
-
- def _update_relation_data(self, relation_id: int, data: dict) -> None:
- """Updates a set of key-value pairs in the relation.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- data: dict containing the key-value pairs
- that should be updated in the relation.
- """
- if self.local_unit.is_leader():
- relation = self.charm.model.get_relation(self.relation_name, relation_id)
- relation.data[self.local_app].update(data)
-
- def _diff(self, event: RelationChangedEvent) -> Diff:
- """Retrieves the diff of the data in the relation changed databag.
-
- Args:
- event: relation changed event.
-
- Returns:
- a Diff instance containing the added, deleted and changed
- keys from the event relation databag.
- """
- return diff(event, self.local_unit)
-
- @property
- def relations(self) -> List[Relation]:
- """The list of Relation instances associated with this relation_name."""
- return [
- relation
- for relation in self.charm.model.relations[self.relation_name]
- if self._is_relation_active(relation)
- ]
-
- @staticmethod
- def _is_relation_active(relation: Relation):
- try:
- _ = repr(relation.data)
- return True
- except RuntimeError:
- return False
-
- @staticmethod
- def _is_resource_created_for_relation(relation: Relation):
- return (
- "username" in relation.data[relation.app] and "password" in relation.data[relation.app]
- )
-
- def is_resource_created(self, relation_id: Optional[int] = None) -> bool:
- """Check if the resource has been created.
-
- This function can be used to check if the Provider answered with data in the charm code
- when outside an event callback.
-
- Args:
- relation_id (int, optional): When provided the check is done only for the relation id
- provided, otherwise the check is done for all relations
-
- Returns:
- True or False
-
- Raises:
- IndexError: If relation_id is provided but that relation does not exist
- """
- if relation_id is not None:
- try:
- relation = [relation for relation in self.relations if relation.id == relation_id][
- 0
- ]
- return self._is_resource_created_for_relation(relation)
- except IndexError:
- raise IndexError(f"relation id {relation_id} cannot be accessed")
- else:
- return (
- all(
- [
- self._is_resource_created_for_relation(relation)
- for relation in self.relations
- ]
- )
- if self.relations
- else False
- )
-
-
-# General events
-
-
-class ExtraRoleEvent(RelationEvent):
- """Base class for data events."""
-
- @property
- def extra_user_roles(self) -> Optional[str]:
- """Returns the extra user roles that were requested."""
- return self.relation.data[self.relation.app].get("extra-user-roles")
-
-
-class AuthenticationEvent(RelationEvent):
- """Base class for authentication fields for events."""
-
- @property
- def username(self) -> Optional[str]:
- """Returns the created username."""
- return self.relation.data[self.relation.app].get("username")
-
- @property
- def password(self) -> Optional[str]:
- """Returns the password for the created user."""
- return self.relation.data[self.relation.app].get("password")
-
- @property
- def tls(self) -> Optional[str]:
- """Returns whether TLS is configured."""
- return self.relation.data[self.relation.app].get("tls")
-
- @property
- def tls_ca(self) -> Optional[str]:
- """Returns TLS CA."""
- return self.relation.data[self.relation.app].get("tls-ca")
-
-
-# Database related events and fields
-
-
-class DatabaseProvidesEvent(RelationEvent):
- """Base class for database events."""
-
- @property
- def database(self) -> Optional[str]:
- """Returns the database that was requested."""
- return self.relation.data[self.relation.app].get("database")
-
-
-class DatabaseRequestedEvent(DatabaseProvidesEvent, ExtraRoleEvent):
- """Event emitted when a new database is requested for use on this relation."""
-
-
-class DatabaseProvidesEvents(CharmEvents):
- """Database events.
-
- This class defines the events that the database can emit.
- """
-
- database_requested = EventSource(DatabaseRequestedEvent)
-
-
-class DatabaseRequiresEvent(RelationEvent):
- """Base class for database events."""
-
- @property
- def endpoints(self) -> Optional[str]:
- """Returns a comma separated list of read/write endpoints."""
- return self.relation.data[self.relation.app].get("endpoints")
-
- @property
- def read_only_endpoints(self) -> Optional[str]:
- """Returns a comma separated list of read only endpoints."""
- return self.relation.data[self.relation.app].get("read-only-endpoints")
-
- @property
- def replset(self) -> Optional[str]:
- """Returns the replicaset name.
-
- MongoDB only.
- """
- return self.relation.data[self.relation.app].get("replset")
-
- @property
- def uris(self) -> Optional[str]:
- """Returns the connection URIs.
-
- MongoDB, Redis, OpenSearch.
- """
- return self.relation.data[self.relation.app].get("uris")
-
- @property
- def version(self) -> Optional[str]:
- """Returns the version of the database.
-
- Version as informed by the database daemon.
- """
- return self.relation.data[self.relation.app].get("version")
-
-
-class DatabaseCreatedEvent(AuthenticationEvent, DatabaseRequiresEvent):
- """Event emitted when a new database is created for use on this relation."""
-
-
-class DatabaseEndpointsChangedEvent(AuthenticationEvent, DatabaseRequiresEvent):
- """Event emitted when the read/write endpoints are changed."""
-
-
-class DatabaseReadOnlyEndpointsChangedEvent(AuthenticationEvent, DatabaseRequiresEvent):
- """Event emitted when the read only endpoints are changed."""
-
-
-class DatabaseRequiresEvents(CharmEvents):
- """Database events.
-
- This class defines the events that the database can emit.
- """
-
- database_created = EventSource(DatabaseCreatedEvent)
- endpoints_changed = EventSource(DatabaseEndpointsChangedEvent)
- read_only_endpoints_changed = EventSource(DatabaseReadOnlyEndpointsChangedEvent)
-
-
-# Database Provider and Requires
-
-
-class DatabaseProvides(DataProvides):
- """Provider-side of the database relations."""
-
- on = DatabaseProvidesEvents()
-
- def __init__(self, charm: CharmBase, relation_name: str) -> None:
- super().__init__(charm, relation_name)
-
- def _on_relation_changed(self, event: RelationChangedEvent) -> None:
- """Event emitted when the relation has changed."""
- # Only the leader should handle this event.
- if not self.local_unit.is_leader():
- return
-
- # Check which data has changed to emit customs events.
- diff = self._diff(event)
-
- # Emit a database requested event if the setup key (database name and optional
- # extra user roles) was added to the relation databag by the application.
- if "database" in diff.added:
- self.on.database_requested.emit(event.relation, app=event.app, unit=event.unit)
-
- def set_endpoints(self, relation_id: int, connection_strings: str) -> None:
- """Set database primary connections.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- connection_strings: database hosts and ports comma separated list.
- """
- self._update_relation_data(relation_id, {"endpoints": connection_strings})
-
- def set_read_only_endpoints(self, relation_id: int, connection_strings: str) -> None:
- """Set database replicas connection strings.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- connection_strings: database hosts and ports comma separated list.
- """
- self._update_relation_data(relation_id, {"read-only-endpoints": connection_strings})
-
- def set_replset(self, relation_id: int, replset: str) -> None:
- """Set replica set name in the application relation databag.
-
- MongoDB only.
-
- Args:
- relation_id: the identifier for a particular relation.
- replset: replica set name.
- """
- self._update_relation_data(relation_id, {"replset": replset})
-
- def set_uris(self, relation_id: int, uris: str) -> None:
- """Set the database connection URIs in the application relation databag.
-
- MongoDB, Redis, and OpenSearch only.
-
- Args:
- relation_id: the identifier for a particular relation.
- uris: connection URIs.
- """
- self._update_relation_data(relation_id, {"uris": uris})
-
- def set_version(self, relation_id: int, version: str) -> None:
- """Set the database version in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- version: database version.
- """
- self._update_relation_data(relation_id, {"version": version})
-
-
-class DatabaseRequires(DataRequires):
- """Requires-side of the database relation."""
-
- on = DatabaseRequiresEvents()
-
- def __init__(
- self,
- charm,
- relation_name: str,
- database_name: str,
- extra_user_roles: str = None,
- relations_aliases: List[str] = None,
- ):
- """Manager of database client relations."""
- super().__init__(charm, relation_name, extra_user_roles)
- self.database = database_name
- self.relations_aliases = relations_aliases
-
- # Define custom event names for each alias.
- if relations_aliases:
- # Ensure the number of aliases does not exceed the maximum
- # of connections allowed in the specific relation.
- relation_connection_limit = self.charm.meta.requires[relation_name].limit
- if len(relations_aliases) != relation_connection_limit:
- raise ValueError(
- f"The number of aliases must match the maximum number of connections allowed in the relation. "
- f"Expected {relation_connection_limit}, got {len(relations_aliases)}"
- )
-
- for relation_alias in relations_aliases:
- self.on.define_event(f"{relation_alias}_database_created", DatabaseCreatedEvent)
- self.on.define_event(
- f"{relation_alias}_endpoints_changed", DatabaseEndpointsChangedEvent
- )
- self.on.define_event(
- f"{relation_alias}_read_only_endpoints_changed",
- DatabaseReadOnlyEndpointsChangedEvent,
- )
-
- def _assign_relation_alias(self, relation_id: int) -> None:
- """Assigns an alias to a relation.
-
- This function writes in the unit data bag.
-
- Args:
- relation_id: the identifier for a particular relation.
- """
- # If no aliases were provided, return immediately.
- if not self.relations_aliases:
- return
-
- # Return if an alias was already assigned to this relation
- # (like when there are more than one unit joining the relation).
- if (
- self.charm.model.get_relation(self.relation_name, relation_id)
- .data[self.local_unit]
- .get("alias")
- ):
- return
-
- # Retrieve the available aliases (the ones that weren't assigned to any relation).
- available_aliases = self.relations_aliases[:]
- for relation in self.charm.model.relations[self.relation_name]:
- alias = relation.data[self.local_unit].get("alias")
- if alias:
- logger.debug("Alias %s was already assigned to relation %d", alias, relation.id)
- available_aliases.remove(alias)
-
- # Set the alias in the unit relation databag of the specific relation.
- relation = self.charm.model.get_relation(self.relation_name, relation_id)
- relation.data[self.local_unit].update({"alias": available_aliases[0]})
-
- def _emit_aliased_event(self, event: RelationChangedEvent, event_name: str) -> None:
- """Emit an aliased event to a particular relation if it has an alias.
-
- Args:
- event: the relation changed event that was received.
- event_name: the name of the event to emit.
- """
- alias = self._get_relation_alias(event.relation.id)
- if alias:
- getattr(self.on, f"{alias}_{event_name}").emit(
- event.relation, app=event.app, unit=event.unit
- )
-
- def _get_relation_alias(self, relation_id: int) -> Optional[str]:
- """Returns the relation alias.
-
- Args:
- relation_id: the identifier for a particular relation.
-
- Returns:
- the relation alias or None if the relation was not found.
- """
- for relation in self.charm.model.relations[self.relation_name]:
- if relation.id == relation_id:
- return relation.data[self.local_unit].get("alias")
- return None
-
- def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None:
- """Event emitted when the application joins the database relation."""
- # If relations aliases were provided, assign one to the relation.
- self._assign_relation_alias(event.relation.id)
-
- # Sets both database and extra user roles in the relation
- # if the roles are provided. Otherwise, sets only the database.
- if self.extra_user_roles:
- self._update_relation_data(
- event.relation.id,
- {
- "database": self.database,
- "extra-user-roles": self.extra_user_roles,
- },
- )
- else:
- self._update_relation_data(event.relation.id, {"database": self.database})
-
- def _on_relation_changed_event(self, event: RelationChangedEvent) -> None:
- """Event emitted when the database relation has changed."""
- # Check which data has changed to emit customs events.
- diff = self._diff(event)
-
- # Check if the database is created
- # (the database charm shared the credentials).
- if "username" in diff.added and "password" in diff.added:
- # Emit the default event (the one without an alias).
- logger.info("database created at %s", datetime.now())
- self.on.database_created.emit(event.relation, app=event.app, unit=event.unit)
-
- # Emit the aliased event (if any).
- self._emit_aliased_event(event, "database_created")
-
- # To avoid unnecessary application restarts do not trigger
- # âendpoints_changedâ event if âdatabase_createdâ is triggered.
- return
-
- # Emit an endpoints changed event if the database
- # added or changed this info in the relation databag.
- if "endpoints" in diff.added or "endpoints" in diff.changed:
- # Emit the default event (the one without an alias).
- logger.info("endpoints changed on %s", datetime.now())
- self.on.endpoints_changed.emit(event.relation, app=event.app, unit=event.unit)
-
- # Emit the aliased event (if any).
- self._emit_aliased_event(event, "endpoints_changed")
-
- # To avoid unnecessary application restarts do not trigger
- # âread_only_endpoints_changedâ event if âendpoints_changedâ is triggered.
- return
-
- # Emit a read only endpoints changed event if the database
- # added or changed this info in the relation databag.
- if "read-only-endpoints" in diff.added or "read-only-endpoints" in diff.changed:
- # Emit the default event (the one without an alias).
- logger.info("read-only-endpoints changed on %s", datetime.now())
- self.on.read_only_endpoints_changed.emit(
- event.relation, app=event.app, unit=event.unit
- )
-
- # Emit the aliased event (if any).
- self._emit_aliased_event(event, "read_only_endpoints_changed")
-
-
-# Kafka related events
-
-
-class KafkaProvidesEvent(RelationEvent):
- """Base class for Kafka events."""
-
- @property
- def topic(self) -> Optional[str]:
- """Returns the topic that was requested."""
- return self.relation.data[self.relation.app].get("topic")
-
-
-class TopicRequestedEvent(KafkaProvidesEvent, ExtraRoleEvent):
- """Event emitted when a new topic is requested for use on this relation."""
-
-
-class KafkaProvidesEvents(CharmEvents):
- """Kafka events.
-
- This class defines the events that the Kafka can emit.
- """
-
- topic_requested = EventSource(TopicRequestedEvent)
-
-
-class KafkaRequiresEvent(RelationEvent):
- """Base class for Kafka events."""
-
- @property
- def bootstrap_server(self) -> Optional[str]:
- """Returns a a comma-seperated list of broker uris."""
- return self.relation.data[self.relation.app].get("endpoints")
-
- @property
- def consumer_group_prefix(self) -> Optional[str]:
- """Returns the consumer-group-prefix."""
- return self.relation.data[self.relation.app].get("consumer-group-prefix")
-
- @property
- def zookeeper_uris(self) -> Optional[str]:
- """Returns a comma separated list of Zookeeper uris."""
- return self.relation.data[self.relation.app].get("zookeeper-uris")
-
-
-class TopicCreatedEvent(AuthenticationEvent, KafkaRequiresEvent):
- """Event emitted when a new topic is created for use on this relation."""
-
-
-class BootstrapServerChangedEvent(AuthenticationEvent, KafkaRequiresEvent):
- """Event emitted when the bootstrap server is changed."""
-
-
-class KafkaRequiresEvents(CharmEvents):
- """Kafka events.
-
- This class defines the events that the Kafka can emit.
- """
-
- topic_created = EventSource(TopicCreatedEvent)
- bootstrap_server_changed = EventSource(BootstrapServerChangedEvent)
-
-
-# Kafka Provides and Requires
-
-
-class KafkaProvides(DataProvides):
- """Provider-side of the Kafka relation."""
-
- on = KafkaProvidesEvents()
-
- def __init__(self, charm: CharmBase, relation_name: str) -> None:
- super().__init__(charm, relation_name)
-
- def _on_relation_changed(self, event: RelationChangedEvent) -> None:
- """Event emitted when the relation has changed."""
- # Only the leader should handle this event.
- if not self.local_unit.is_leader():
- return
-
- # Check which data has changed to emit customs events.
- diff = self._diff(event)
-
- # Emit a topic requested event if the setup key (topic name and optional
- # extra user roles) was added to the relation databag by the application.
- if "topic" in diff.added:
- self.on.topic_requested.emit(event.relation, app=event.app, unit=event.unit)
-
- def set_bootstrap_server(self, relation_id: int, bootstrap_server: str) -> None:
- """Set the bootstrap server in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- bootstrap_server: the bootstrap server address.
- """
- self._update_relation_data(relation_id, {"endpoints": bootstrap_server})
-
- def set_consumer_group_prefix(self, relation_id: int, consumer_group_prefix: str) -> None:
- """Set the consumer group prefix in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- consumer_group_prefix: the consumer group prefix string.
- """
- self._update_relation_data(relation_id, {"consumer-group-prefix": consumer_group_prefix})
-
- def set_zookeeper_uris(self, relation_id: int, zookeeper_uris: str) -> None:
- """Set the zookeeper uris in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- zookeeper_uris: comma-seperated list of ZooKeeper server uris.
- """
- self._update_relation_data(relation_id, {"zookeeper-uris": zookeeper_uris})
-
-
-class KafkaRequires(DataRequires):
- """Requires-side of the Kafka relation."""
-
- on = KafkaRequiresEvents()
-
- def __init__(self, charm, relation_name: str, topic: str, extra_user_roles: str = None):
- """Manager of Kafka client relations."""
- # super().__init__(charm, relation_name)
- super().__init__(charm, relation_name, extra_user_roles)
- self.charm = charm
- self.topic = topic
-
- def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None:
- """Event emitted when the application joins the Kafka relation."""
- # Sets both topic and extra user roles in the relation
- # if the roles are provided. Otherwise, sets only the topic.
- self._update_relation_data(
- event.relation.id,
- {
- "topic": self.topic,
- "extra-user-roles": self.extra_user_roles,
- }
- if self.extra_user_roles is not None
- else {"topic": self.topic},
- )
-
- def _on_relation_changed_event(self, event: RelationChangedEvent) -> None:
- """Event emitted when the Kafka relation has changed."""
- # Check which data has changed to emit customs events.
- diff = self._diff(event)
-
- # Check if the topic is created
- # (the Kafka charm shared the credentials).
- if "username" in diff.added and "password" in diff.added:
- # Emit the default event (the one without an alias).
- logger.info("topic created at %s", datetime.now())
- self.on.topic_created.emit(event.relation, app=event.app, unit=event.unit)
-
- # To avoid unnecessary application restarts do not trigger
- # âendpoints_changedâ event if âtopic_createdâ is triggered.
- return
-
- # Emit an endpoints (bootstap-server) changed event if the Kakfa endpoints
- # added or changed this info in the relation databag.
- if "endpoints" in diff.added or "endpoints" in diff.changed:
- # Emit the default event (the one without an alias).
- logger.info("endpoints changed on %s", datetime.now())
- self.on.bootstrap_server_changed.emit(
- event.relation, app=event.app, unit=event.unit
- ) # here check if this is the right design
- return
diff --git a/installers/charm/osm-mon/lib/charms/kafka_k8s/v0/kafka.py b/installers/charm/osm-mon/lib/charms/kafka_k8s/v0/kafka.py
deleted file mode 100644
index aeb5edcb..00000000
--- a/installers/charm/osm-mon/lib/charms/kafka_k8s/v0/kafka.py
+++ /dev/null
@@ -1,200 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-# See LICENSE file for licensing details.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Kafka library.
-
-This [library](https://juju.is/docs/sdk/libraries) implements both sides of the
-`kafka` [interface](https://juju.is/docs/sdk/relations).
-
-The *provider* side of this interface is implemented by the
-[kafka-k8s Charmed Operator](https://charmhub.io/kafka-k8s).
-
-Any Charmed Operator that *requires* Kafka for providing its
-service should implement the *requirer* side of this interface.
-
-In a nutshell using this library to implement a Charmed Operator *requiring*
-Kafka would look like
-
-```
-$ charmcraft fetch-lib charms.kafka_k8s.v0.kafka
-```
-
-`metadata.yaml`:
-
-```
-requires:
- kafka:
- interface: kafka
- limit: 1
-```
-
-`src/charm.py`:
-
-```
-from charms.kafka_k8s.v0.kafka import KafkaEvents, KafkaRequires
-from ops.charm import CharmBase
-
-
-class MyCharm(CharmBase):
-
- on = KafkaEvents()
-
- def __init__(self, *args):
- super().__init__(*args)
- self.kafka = KafkaRequires(self)
- self.framework.observe(
- self.on.kafka_available,
- self._on_kafka_available,
- )
- self.framework.observe(
- self.on["kafka"].relation_broken,
- self._on_kafka_broken,
- )
-
- def _on_kafka_available(self, event):
- # Get Kafka host and port
- host: str = self.kafka.host
- port: int = self.kafka.port
- # host => "kafka-k8s"
- # port => 9092
-
- def _on_kafka_broken(self, event):
- # Stop service
- # ...
- self.unit.status = BlockedStatus("need kafka relation")
-```
-
-You can file bugs
-[here](https://github.com/charmed-osm/kafka-k8s-operator/issues)!
-"""
-
-from typing import Optional
-
-from ops.charm import CharmBase, CharmEvents
-from ops.framework import EventBase, EventSource, Object
-
-# The unique Charmhub library identifier, never change it
-from ops.model import Relation
-
-LIBID = "eacc8c85082347c9aae740e0220b8376"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 4
-
-
-KAFKA_HOST_APP_KEY = "host"
-KAFKA_PORT_APP_KEY = "port"
-
-
-class _KafkaAvailableEvent(EventBase):
- """Event emitted when Kafka is available."""
-
-
-class KafkaEvents(CharmEvents):
- """Kafka events.
-
- This class defines the events that Kafka can emit.
-
- Events:
- kafka_available (_KafkaAvailableEvent)
- """
-
- kafka_available = EventSource(_KafkaAvailableEvent)
-
-
-class KafkaRequires(Object):
- """Requires-side of the Kafka relation."""
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "kafka") -> None:
- super().__init__(charm, endpoint_name)
- self.charm = charm
- self._endpoint_name = endpoint_name
-
- # Observe relation events
- event_observe_mapping = {
- charm.on[self._endpoint_name].relation_changed: self._on_relation_changed,
- }
- for event, observer in event_observe_mapping.items():
- self.framework.observe(event, observer)
-
- def _on_relation_changed(self, event) -> None:
- if event.relation.app and all(
- key in event.relation.data[event.relation.app]
- for key in (KAFKA_HOST_APP_KEY, KAFKA_PORT_APP_KEY)
- ):
- self.charm.on.kafka_available.emit()
-
- @property
- def host(self) -> str:
- """Get kafka hostname."""
- relation: Relation = self.model.get_relation(self._endpoint_name)
- return (
- relation.data[relation.app].get(KAFKA_HOST_APP_KEY)
- if relation and relation.app
- else None
- )
-
- @property
- def port(self) -> int:
- """Get kafka port number."""
- relation: Relation = self.model.get_relation(self._endpoint_name)
- return (
- int(relation.data[relation.app].get(KAFKA_PORT_APP_KEY))
- if relation and relation.app
- else None
- )
-
-
-class KafkaProvides(Object):
- """Provides-side of the Kafka relation."""
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "kafka") -> None:
- super().__init__(charm, endpoint_name)
- self._endpoint_name = endpoint_name
-
- def set_host_info(self, host: str, port: int, relation: Optional[Relation] = None) -> None:
- """Set Kafka host and port.
-
- This function writes in the application data of the relation, therefore,
- only the unit leader can call it.
-
- Args:
- host (str): Kafka hostname or IP address.
- port (int): Kafka port.
- relation (Optional[Relation]): Relation to update.
- If not specified, all relations will be updated.
-
- Raises:
- Exception: if a non-leader unit calls this function.
- """
- if not self.model.unit.is_leader():
- raise Exception("only the leader set host information.")
-
- if relation:
- self._update_relation_data(host, port, relation)
- return
-
- for relation in self.model.relations[self._endpoint_name]:
- self._update_relation_data(host, port, relation)
-
- def _update_relation_data(self, host: str, port: int, relation: Relation) -> None:
- """Update data in relation if needed."""
- relation.data[self.model.app][KAFKA_HOST_APP_KEY] = host
- relation.data[self.model.app][KAFKA_PORT_APP_KEY] = str(port)
diff --git a/installers/charm/osm-mon/lib/charms/observability_libs/v1/kubernetes_service_patch.py b/installers/charm/osm-mon/lib/charms/observability_libs/v1/kubernetes_service_patch.py
deleted file mode 100644
index 506dbf03..00000000
--- a/installers/charm/osm-mon/lib/charms/observability_libs/v1/kubernetes_service_patch.py
+++ /dev/null
@@ -1,291 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-# See LICENSE file for licensing details.
-# http://www.apache.org/licenses/LICENSE-2.0
-
-"""# KubernetesServicePatch Library.
-
-This library is designed to enable developers to more simply patch the Kubernetes Service created
-by Juju during the deployment of a sidecar charm. When sidecar charms are deployed, Juju creates a
-service named after the application in the namespace (named after the Juju model). This service by
-default contains a "placeholder" port, which is 65536/TCP.
-
-When modifying the default set of resources managed by Juju, one must consider the lifecycle of the
-charm. In this case, any modifications to the default service (created during deployment), will be
-overwritten during a charm upgrade.
-
-When initialised, this library binds a handler to the parent charm's `install` and `upgrade_charm`
-events which applies the patch to the cluster. This should ensure that the service ports are
-correct throughout the charm's life.
-
-The constructor simply takes a reference to the parent charm, and a list of
-[`lightkube`](https://github.com/gtsystem/lightkube) ServicePorts that each define a port for the
-service. For information regarding the `lightkube` `ServicePort` model, please visit the
-`lightkube` [docs](https://gtsystem.github.io/lightkube-models/1.23/models/core_v1/#serviceport).
-
-Optionally, a name of the service (in case service name needs to be patched as well), labels,
-selectors, and annotations can be provided as keyword arguments.
-
-## Getting Started
-
-To get started using the library, you just need to fetch the library using `charmcraft`. **Note
-that you also need to add `lightkube` and `lightkube-models` to your charm's `requirements.txt`.**
-
-```shell
-cd some-charm
-charmcraft fetch-lib charms.observability_libs.v0.kubernetes_service_patch
-echo <<-EOF >> requirements.txt
-lightkube
-lightkube-models
-EOF
-```
-
-Then, to initialise the library:
-
-For `ClusterIP` services:
-
-```python
-# ...
-from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch
-from lightkube.models.core_v1 import ServicePort
-
-class SomeCharm(CharmBase):
- def __init__(self, *args):
- # ...
- port = ServicePort(443, name=f"{self.app.name}")
- self.service_patcher = KubernetesServicePatch(self, [port])
- # ...
-```
-
-For `LoadBalancer`/`NodePort` services:
-
-```python
-# ...
-from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch
-from lightkube.models.core_v1 import ServicePort
-
-class SomeCharm(CharmBase):
- def __init__(self, *args):
- # ...
- port = ServicePort(443, name=f"{self.app.name}", targetPort=443, nodePort=30666)
- self.service_patcher = KubernetesServicePatch(
- self, [port], "LoadBalancer"
- )
- # ...
-```
-
-Port protocols can also be specified. Valid protocols are `"TCP"`, `"UDP"`, and `"SCTP"`
-
-```python
-# ...
-from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch
-from lightkube.models.core_v1 import ServicePort
-
-class SomeCharm(CharmBase):
- def __init__(self, *args):
- # ...
- tcp = ServicePort(443, name=f"{self.app.name}-tcp", protocol="TCP")
- udp = ServicePort(443, name=f"{self.app.name}-udp", protocol="UDP")
- sctp = ServicePort(443, name=f"{self.app.name}-sctp", protocol="SCTP")
- self.service_patcher = KubernetesServicePatch(self, [tcp, udp, sctp])
- # ...
-```
-
-Additionally, you may wish to use mocks in your charm's unit testing to ensure that the library
-does not try to make any API calls, or open any files during testing that are unlikely to be
-present, and could break your tests. The easiest way to do this is during your test `setUp`:
-
-```python
-# ...
-
-@patch("charm.KubernetesServicePatch", lambda x, y: None)
-def setUp(self, *unused):
- self.harness = Harness(SomeCharm)
- # ...
-```
-"""
-
-import logging
-from types import MethodType
-from typing import List, Literal
-
-from lightkube import ApiError, Client
-from lightkube.models.core_v1 import ServicePort, ServiceSpec
-from lightkube.models.meta_v1 import ObjectMeta
-from lightkube.resources.core_v1 import Service
-from lightkube.types import PatchType
-from ops.charm import CharmBase
-from ops.framework import Object
-
-logger = logging.getLogger(__name__)
-
-# The unique Charmhub library identifier, never change it
-LIBID = "0042f86d0a874435adef581806cddbbb"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 1
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 1
-
-ServiceType = Literal["ClusterIP", "LoadBalancer"]
-
-
-class KubernetesServicePatch(Object):
- """A utility for patching the Kubernetes service set up by Juju."""
-
- def __init__(
- self,
- charm: CharmBase,
- ports: List[ServicePort],
- service_name: str = None,
- service_type: ServiceType = "ClusterIP",
- additional_labels: dict = None,
- additional_selectors: dict = None,
- additional_annotations: dict = None,
- ):
- """Constructor for KubernetesServicePatch.
-
- Args:
- charm: the charm that is instantiating the library.
- ports: a list of ServicePorts
- service_name: allows setting custom name to the patched service. If none given,
- application name will be used.
- service_type: desired type of K8s service. Default value is in line with ServiceSpec's
- default value.
- additional_labels: Labels to be added to the kubernetes service (by default only
- "app.kubernetes.io/name" is set to the service name)
- additional_selectors: Selectors to be added to the kubernetes service (by default only
- "app.kubernetes.io/name" is set to the service name)
- additional_annotations: Annotations to be added to the kubernetes service.
- """
- super().__init__(charm, "kubernetes-service-patch")
- self.charm = charm
- self.service_name = service_name if service_name else self._app
- self.service = self._service_object(
- ports,
- service_name,
- service_type,
- additional_labels,
- additional_selectors,
- additional_annotations,
- )
-
- # Make mypy type checking happy that self._patch is a method
- assert isinstance(self._patch, MethodType)
- # Ensure this patch is applied during the 'install' and 'upgrade-charm' events
- self.framework.observe(charm.on.install, self._patch)
- self.framework.observe(charm.on.upgrade_charm, self._patch)
-
- def _service_object(
- self,
- ports: List[ServicePort],
- service_name: str = None,
- service_type: ServiceType = "ClusterIP",
- additional_labels: dict = None,
- additional_selectors: dict = None,
- additional_annotations: dict = None,
- ) -> Service:
- """Creates a valid Service representation.
-
- Args:
- ports: a list of ServicePorts
- service_name: allows setting custom name to the patched service. If none given,
- application name will be used.
- service_type: desired type of K8s service. Default value is in line with ServiceSpec's
- default value.
- additional_labels: Labels to be added to the kubernetes service (by default only
- "app.kubernetes.io/name" is set to the service name)
- additional_selectors: Selectors to be added to the kubernetes service (by default only
- "app.kubernetes.io/name" is set to the service name)
- additional_annotations: Annotations to be added to the kubernetes service.
-
- Returns:
- Service: A valid representation of a Kubernetes Service with the correct ports.
- """
- if not service_name:
- service_name = self._app
- labels = {"app.kubernetes.io/name": self._app}
- if additional_labels:
- labels.update(additional_labels)
- selector = {"app.kubernetes.io/name": self._app}
- if additional_selectors:
- selector.update(additional_selectors)
- return Service(
- apiVersion="v1",
- kind="Service",
- metadata=ObjectMeta(
- namespace=self._namespace,
- name=service_name,
- labels=labels,
- annotations=additional_annotations, # type: ignore[arg-type]
- ),
- spec=ServiceSpec(
- selector=selector,
- ports=ports,
- type=service_type,
- ),
- )
-
- def _patch(self, _) -> None:
- """Patch the Kubernetes service created by Juju to map the correct port.
-
- Raises:
- PatchFailed: if patching fails due to lack of permissions, or otherwise.
- """
- if not self.charm.unit.is_leader():
- return
-
- client = Client()
- try:
- if self.service_name != self._app:
- self._delete_and_create_service(client)
- client.patch(Service, self.service_name, self.service, patch_type=PatchType.MERGE)
- except ApiError as e:
- if e.status.code == 403:
- logger.error("Kubernetes service patch failed: `juju trust` this application.")
- else:
- logger.error("Kubernetes service patch failed: %s", str(e))
- else:
- logger.info("Kubernetes service '%s' patched successfully", self._app)
-
- def _delete_and_create_service(self, client: Client):
- service = client.get(Service, self._app, namespace=self._namespace)
- service.metadata.name = self.service_name # type: ignore[attr-defined]
- service.metadata.resourceVersion = service.metadata.uid = None # type: ignore[attr-defined] # noqa: E501
- client.delete(Service, self._app, namespace=self._namespace)
- client.create(service)
-
- def is_patched(self) -> bool:
- """Reports if the service patch has been applied.
-
- Returns:
- bool: A boolean indicating if the service patch has been applied.
- """
- client = Client()
- # Get the relevant service from the cluster
- service = client.get(Service, name=self.service_name, namespace=self._namespace)
- # Construct a list of expected ports, should the patch be applied
- expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports]
- # Construct a list in the same manner, using the fetched service
- fetched_ports = [(p.port, p.targetPort) for p in service.spec.ports] # type: ignore[attr-defined] # noqa: E501
- return expected_ports == fetched_ports
-
- @property
- def _app(self) -> str:
- """Name of the current Juju application.
-
- Returns:
- str: A string containing the name of the current Juju application.
- """
- return self.charm.app.name
-
- @property
- def _namespace(self) -> str:
- """The Kubernetes namespace we're running in.
-
- Returns:
- str: A string containing the name of the current Kubernetes namespace.
- """
- with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f:
- return f.read().strip()
diff --git a/installers/charm/osm-mon/lib/charms/osm_libs/v0/utils.py b/installers/charm/osm-mon/lib/charms/osm_libs/v0/utils.py
deleted file mode 100644
index d739ba68..00000000
--- a/installers/charm/osm-mon/lib/charms/osm_libs/v0/utils.py
+++ /dev/null
@@ -1,544 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-# See LICENSE file for licensing details.
-# http://www.apache.org/licenses/LICENSE-2.0
-"""OSM Utils Library.
-
-This library offers some utilities made for but not limited to Charmed OSM.
-
-# Getting started
-
-Execute the following command inside your Charmed Operator folder to fetch the library.
-
-```shell
-charmcraft fetch-lib charms.osm_libs.v0.utils
-```
-
-# CharmError Exception
-
-An exception that takes to arguments, the message and the StatusBase class, which are useful
-to set the status of the charm when the exception raises.
-
-Example:
-```shell
-from charms.osm_libs.v0.utils import CharmError
-
-class MyCharm(CharmBase):
- def _on_config_changed(self, _):
- try:
- if not self.config.get("some-option"):
- raise CharmError("need some-option", BlockedStatus)
-
- if not self.mysql_ready:
- raise CharmError("waiting for mysql", WaitingStatus)
-
- # Do stuff...
-
- exception CharmError as e:
- self.unit.status = e.status
-```
-
-# Pebble validations
-
-The `check_container_ready` function checks that a container is ready,
-and therefore Pebble is ready.
-
-The `check_service_active` function checks that a service in a container is running.
-
-Both functions raise a CharmError if the validations fail.
-
-Example:
-```shell
-from charms.osm_libs.v0.utils import check_container_ready, check_service_active
-
-class MyCharm(CharmBase):
- def _on_config_changed(self, _):
- try:
- container: Container = self.unit.get_container("my-container")
- check_container_ready(container)
- check_service_active(container, "my-service")
- # Do stuff...
-
- exception CharmError as e:
- self.unit.status = e.status
-```
-
-# Debug-mode
-
-The debug-mode allows OSM developers to easily debug OSM modules.
-
-Example:
-```shell
-from charms.osm_libs.v0.utils import DebugMode
-
-class MyCharm(CharmBase):
- _stored = StoredState()
-
- def __init__(self, _):
- # ...
- container: Container = self.unit.get_container("my-container")
- hostpaths = [
- HostPath(
- config="module-hostpath",
- container_path="/usr/lib/python3/dist-packages/module"
- ),
- ]
- vscode_workspace_path = "files/vscode-workspace.json"
- self.debug_mode = DebugMode(
- self,
- self._stored,
- container,
- hostpaths,
- vscode_workspace_path,
- )
-
- def _on_update_status(self, _):
- if self.debug_mode.started:
- return
- # ...
-
- def _get_debug_mode_information(self):
- command = self.debug_mode.command
- password = self.debug_mode.password
- return command, password
-```
-
-# More
-
-- Get pod IP with `get_pod_ip()`
-"""
-from dataclasses import dataclass
-import logging
-import secrets
-import socket
-from pathlib import Path
-from typing import List
-
-from lightkube import Client
-from lightkube.models.core_v1 import HostPathVolumeSource, Volume, VolumeMount
-from lightkube.resources.apps_v1 import StatefulSet
-from ops.charm import CharmBase
-from ops.framework import Object, StoredState
-from ops.model import (
- ActiveStatus,
- BlockedStatus,
- Container,
- MaintenanceStatus,
- StatusBase,
- WaitingStatus,
-)
-from ops.pebble import ServiceStatus
-
-# The unique Charmhub library identifier, never change it
-LIBID = "e915908eebee4cdd972d484728adf984"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 5
-
-logger = logging.getLogger(__name__)
-
-
-class CharmError(Exception):
- """Charm Error Exception."""
-
- def __init__(self, message: str, status_class: StatusBase = BlockedStatus) -> None:
- self.message = message
- self.status_class = status_class
- self.status = status_class(message)
-
-
-def check_container_ready(container: Container) -> None:
- """Check Pebble has started in the container.
-
- Args:
- container (Container): Container to be checked.
-
- Raises:
- CharmError: if container is not ready.
- """
- if not container.can_connect():
- raise CharmError("waiting for pebble to start", MaintenanceStatus)
-
-
-def check_service_active(container: Container, service_name: str) -> None:
- """Check if the service is running.
-
- Args:
- container (Container): Container to be checked.
- service_name (str): Name of the service to check.
-
- Raises:
- CharmError: if the service is not running.
- """
- if service_name not in container.get_plan().services:
- raise CharmError(f"{service_name} service not configured yet", WaitingStatus)
-
- if container.get_service(service_name).current != ServiceStatus.ACTIVE:
- raise CharmError(f"{service_name} service is not running")
-
-
-def get_pod_ip() -> str:
- """Get Kubernetes Pod IP.
-
- Returns:
- str: The IP of the Pod.
- """
- return socket.gethostbyname(socket.gethostname())
-
-
-_DEBUG_SCRIPT = r"""#!/bin/bash
-# Install SSH
-
-function download_code(){{
- wget https://go.microsoft.com/fwlink/?LinkID=760868 -O code.deb
-}}
-
-function setup_envs(){{
- grep "source /debug.envs" /root/.bashrc || echo "source /debug.envs" | tee -a /root/.bashrc
-}}
-function setup_ssh(){{
- apt install ssh -y
- cat /etc/ssh/sshd_config |
- grep -E '^PermitRootLogin yes$$' || (
- echo PermitRootLogin yes |
- tee -a /etc/ssh/sshd_config
- )
- service ssh stop
- sleep 3
- service ssh start
- usermod --password $(echo {} | openssl passwd -1 -stdin) root
-}}
-
-function setup_code(){{
- apt install libasound2 -y
- (dpkg -i code.deb || apt-get install -f -y || apt-get install -f -y) && echo Code installed successfully
- code --install-extension ms-python.python --user-data-dir /root
- mkdir -p /root/.vscode-server
- cp -R /root/.vscode/extensions /root/.vscode-server/extensions
-}}
-
-export DEBIAN_FRONTEND=noninteractive
-apt update && apt install wget -y
-download_code &
-setup_ssh &
-setup_envs
-wait
-setup_code &
-wait
-"""
-
-
-@dataclass
-class SubModule:
- """Represent RO Submodules."""
- sub_module_path: str
- container_path: str
-
-
-class HostPath:
- """Represents a hostpath."""
- def __init__(self, config: str, container_path: str, submodules: dict = None) -> None:
- mount_path_items = config.split("-")
- mount_path_items.reverse()
- self.mount_path = "/" + "/".join(mount_path_items)
- self.config = config
- self.sub_module_dict = {}
- if submodules:
- for submodule in submodules.keys():
- self.sub_module_dict[submodule] = SubModule(
- sub_module_path=self.mount_path + "/" + submodule + "/" + submodules[submodule].split("/")[-1],
- container_path=submodules[submodule],
- )
- else:
- self.container_path = container_path
- self.module_name = container_path.split("/")[-1]
-
-class DebugMode(Object):
- """Class to handle the debug-mode."""
-
- def __init__(
- self,
- charm: CharmBase,
- stored: StoredState,
- container: Container,
- hostpaths: List[HostPath] = [],
- vscode_workspace_path: str = "files/vscode-workspace.json",
- ) -> None:
- super().__init__(charm, "debug-mode")
-
- self.charm = charm
- self._stored = stored
- self.hostpaths = hostpaths
- self.vscode_workspace = Path(vscode_workspace_path).read_text()
- self.container = container
-
- self._stored.set_default(
- debug_mode_started=False,
- debug_mode_vscode_command=None,
- debug_mode_password=None,
- )
-
- self.framework.observe(self.charm.on.config_changed, self._on_config_changed)
- self.framework.observe(self.charm.on[container.name].pebble_ready, self._on_config_changed)
- self.framework.observe(self.charm.on.update_status, self._on_update_status)
-
- def _on_config_changed(self, _) -> None:
- """Handler for the config-changed event."""
- if not self.charm.unit.is_leader():
- return
-
- debug_mode_enabled = self.charm.config.get("debug-mode", False)
- action = self.enable if debug_mode_enabled else self.disable
- action()
-
- def _on_update_status(self, _) -> None:
- """Handler for the update-status event."""
- if not self.charm.unit.is_leader() or not self.started:
- return
-
- self.charm.unit.status = ActiveStatus("debug-mode: ready")
-
- @property
- def started(self) -> bool:
- """Indicates whether the debug-mode has started or not."""
- return self._stored.debug_mode_started
-
- @property
- def command(self) -> str:
- """Command to launch vscode."""
- return self._stored.debug_mode_vscode_command
-
- @property
- def password(self) -> str:
- """SSH password."""
- return self._stored.debug_mode_password
-
- def enable(self, service_name: str = None) -> None:
- """Enable debug-mode.
-
- This function mounts hostpaths of the OSM modules (if set), and
- configures the container so it can be easily debugged. The setup
- includes the configuration of SSH, environment variables, and
- VSCode workspace and plugins.
-
- Args:
- service_name (str, optional): Pebble service name which has the desired environment
- variables. Mandatory if there is more than one Pebble service configured.
- """
- hostpaths_to_reconfigure = self._hostpaths_to_reconfigure()
- if self.started and not hostpaths_to_reconfigure:
- self.charm.unit.status = ActiveStatus("debug-mode: ready")
- return
-
- logger.debug("enabling debug-mode")
-
- # Mount hostpaths if set.
- # If hostpaths are mounted, the statefulset will be restarted,
- # and for that reason we return immediately. On restart, the hostpaths
- # won't be mounted and then we can continue and setup the debug-mode.
- if hostpaths_to_reconfigure:
- self.charm.unit.status = MaintenanceStatus("debug-mode: configuring hostpaths")
- self._configure_hostpaths(hostpaths_to_reconfigure)
- return
-
- self.charm.unit.status = MaintenanceStatus("debug-mode: starting")
- password = secrets.token_hex(8)
- self._setup_debug_mode(
- password,
- service_name,
- mounted_hostpaths=[hp for hp in self.hostpaths if self.charm.config.get(hp.config)],
- )
-
- self._stored.debug_mode_vscode_command = self._get_vscode_command(get_pod_ip())
- self._stored.debug_mode_password = password
- self._stored.debug_mode_started = True
- logger.info("debug-mode is ready")
- self.charm.unit.status = ActiveStatus("debug-mode: ready")
-
- def disable(self) -> None:
- """Disable debug-mode."""
- logger.debug("disabling debug-mode")
- current_status = self.charm.unit.status
- hostpaths_unmounted = self._unmount_hostpaths()
-
- if not self._stored.debug_mode_started:
- return
- self._stored.debug_mode_started = False
- self._stored.debug_mode_vscode_command = None
- self._stored.debug_mode_password = None
-
- if not hostpaths_unmounted:
- self.charm.unit.status = current_status
- self._restart()
-
- def _hostpaths_to_reconfigure(self) -> List[HostPath]:
- hostpaths_to_reconfigure: List[HostPath] = []
- client = Client()
- statefulset = client.get(StatefulSet, self.charm.app.name, namespace=self.charm.model.name)
- volumes = statefulset.spec.template.spec.volumes
-
- for hostpath in self.hostpaths:
- hostpath_is_set = True if self.charm.config.get(hostpath.config) else False
- hostpath_already_configured = next(
- (True for volume in volumes if volume.name == hostpath.config), False
- )
- if hostpath_is_set != hostpath_already_configured:
- hostpaths_to_reconfigure.append(hostpath)
-
- return hostpaths_to_reconfigure
-
- def _setup_debug_mode(
- self,
- password: str,
- service_name: str = None,
- mounted_hostpaths: List[HostPath] = [],
- ) -> None:
- services = self.container.get_plan().services
- if not service_name and len(services) != 1:
- raise Exception("Cannot start debug-mode: please set the service_name")
-
- service = None
- if not service_name:
- service_name, service = services.popitem()
- if not service:
- service = services.get(service_name)
-
- logger.debug(f"getting environment variables from service {service_name}")
- environment = service.environment
- environment_file_content = "\n".join(
- [f'export {key}="{value}"' for key, value in environment.items()]
- )
- logger.debug(f"pushing environment file to {self.container.name} container")
- self.container.push("/debug.envs", environment_file_content)
-
- # Push VSCode workspace
- logger.debug(f"pushing vscode workspace to {self.container.name} container")
- self.container.push("/debug.code-workspace", self.vscode_workspace)
-
- # Execute debugging script
- logger.debug(f"pushing debug-mode setup script to {self.container.name} container")
- self.container.push("/debug.sh", _DEBUG_SCRIPT.format(password), permissions=0o777)
- logger.debug(f"executing debug-mode setup script in {self.container.name} container")
- self.container.exec(["/debug.sh"]).wait_output()
- logger.debug(f"stopping service {service_name} in {self.container.name} container")
- self.container.stop(service_name)
-
- # Add symlinks to mounted hostpaths
- for hostpath in mounted_hostpaths:
- logger.debug(f"adding symlink for {hostpath.config}")
- if len(hostpath.sub_module_dict) > 0:
- for sub_module in hostpath.sub_module_dict.keys():
- self.container.exec(["rm", "-rf", hostpath.sub_module_dict[sub_module].container_path]).wait_output()
- self.container.exec(
- [
- "ln",
- "-s",
- hostpath.sub_module_dict[sub_module].sub_module_path,
- hostpath.sub_module_dict[sub_module].container_path,
- ]
- )
-
- else:
- self.container.exec(["rm", "-rf", hostpath.container_path]).wait_output()
- self.container.exec(
- [
- "ln",
- "-s",
- f"{hostpath.mount_path}/{hostpath.module_name}",
- hostpath.container_path,
- ]
- )
-
- def _configure_hostpaths(self, hostpaths: List[HostPath]):
- client = Client()
- statefulset = client.get(StatefulSet, self.charm.app.name, namespace=self.charm.model.name)
-
- for hostpath in hostpaths:
- if self.charm.config.get(hostpath.config):
- self._add_hostpath_to_statefulset(hostpath, statefulset)
- else:
- self._delete_hostpath_from_statefulset(hostpath, statefulset)
-
- client.replace(statefulset)
-
- def _unmount_hostpaths(self) -> bool:
- client = Client()
- hostpath_unmounted = False
- statefulset = client.get(StatefulSet, self.charm.app.name, namespace=self.charm.model.name)
-
- for hostpath in self.hostpaths:
- if self._delete_hostpath_from_statefulset(hostpath, statefulset):
- hostpath_unmounted = True
-
- if hostpath_unmounted:
- client.replace(statefulset)
-
- return hostpath_unmounted
-
- def _add_hostpath_to_statefulset(self, hostpath: HostPath, statefulset: StatefulSet):
- # Add volume
- logger.debug(f"adding volume {hostpath.config} to {self.charm.app.name} statefulset")
- volume = Volume(
- hostpath.config,
- hostPath=HostPathVolumeSource(
- path=self.charm.config[hostpath.config],
- type="Directory",
- ),
- )
- statefulset.spec.template.spec.volumes.append(volume)
-
- # Add volumeMount
- for statefulset_container in statefulset.spec.template.spec.containers:
- if statefulset_container.name != self.container.name:
- continue
-
- logger.debug(
- f"adding volumeMount {hostpath.config} to {self.container.name} container"
- )
- statefulset_container.volumeMounts.append(
- VolumeMount(mountPath=hostpath.mount_path, name=hostpath.config)
- )
-
- def _delete_hostpath_from_statefulset(self, hostpath: HostPath, statefulset: StatefulSet):
- hostpath_unmounted = False
- for volume in statefulset.spec.template.spec.volumes:
-
- if hostpath.config != volume.name:
- continue
-
- # Remove volumeMount
- for statefulset_container in statefulset.spec.template.spec.containers:
- if statefulset_container.name != self.container.name:
- continue
- for volume_mount in statefulset_container.volumeMounts:
- if volume_mount.name != hostpath.config:
- continue
-
- logger.debug(
- f"removing volumeMount {hostpath.config} from {self.container.name} container"
- )
- statefulset_container.volumeMounts.remove(volume_mount)
-
- # Remove volume
- logger.debug(
- f"removing volume {hostpath.config} from {self.charm.app.name} statefulset"
- )
- statefulset.spec.template.spec.volumes.remove(volume)
-
- hostpath_unmounted = True
- return hostpath_unmounted
-
- def _get_vscode_command(
- self,
- pod_ip: str,
- user: str = "root",
- workspace_path: str = "/debug.code-workspace",
- ) -> str:
- return f"code --remote ssh-remote+{user}@{pod_ip} {workspace_path}"
-
- def _restart(self):
- self.container.exec(["kill", "-HUP", "1"])
diff --git a/installers/charm/osm-mon/lib/charms/osm_vca_integrator/v0/vca.py b/installers/charm/osm-mon/lib/charms/osm_vca_integrator/v0/vca.py
deleted file mode 100644
index 21dac69c..00000000
--- a/installers/charm/osm-mon/lib/charms/osm_vca_integrator/v0/vca.py
+++ /dev/null
@@ -1,221 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-# See LICENSE file for licensing details.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""VCA Library.
-
-VCA stands for VNF Configuration and Abstraction, and is one of the core components
-of OSM. The Juju Controller is in charged of this role.
-
-This [library](https://juju.is/docs/sdk/libraries) implements both sides of the
-`vca` [interface](https://juju.is/docs/sdk/relations).
-
-The *provider* side of this interface is implemented by the
-[osm-vca-integrator Charmed Operator](https://charmhub.io/osm-vca-integrator).
-
-helps to integrate with the
-vca-integrator charm, which provides data needed to the OSM components that need
-to talk to the VCA, and
-
-Any Charmed OSM component that *requires* to talk to the VCA should implement
-the *requirer* side of this interface.
-
-In a nutshell using this library to implement a Charmed Operator *requiring* VCA data
-would look like
-
-```
-$ charmcraft fetch-lib charms.osm_vca_integrator.v0.vca
-```
-
-`metadata.yaml`:
-
-```
-requires:
- vca:
- interface: osm-vca
-```
-
-`src/charm.py`:
-
-```
-from charms.osm_vca_integrator.v0.vca import VcaData, VcaIntegratorEvents, VcaRequires
-from ops.charm import CharmBase
-
-
-class MyCharm(CharmBase):
-
- on = VcaIntegratorEvents()
-
- def __init__(self, *args):
- super().__init__(*args)
- self.vca = VcaRequires(self)
- self.framework.observe(
- self.on.vca_data_changed,
- self._on_vca_data_changed,
- )
-
- def _on_vca_data_changed(self, event):
- # Get Vca data
- data: VcaData = self.vca.data
- # data.endpoints => "localhost:17070"
-```
-
-You can file bugs
-[here](https://github.com/charmed-osm/osm-vca-integrator-operator/issues)!
-"""
-
-import json
-import logging
-from typing import Any, Dict, Optional
-
-from ops.charm import CharmBase, CharmEvents, RelationChangedEvent
-from ops.framework import EventBase, EventSource, Object
-
-# The unique Charmhub library identifier, never change it
-from ops.model import Relation
-
-# The unique Charmhub library identifier, never change it
-LIBID = "746b36c382984e5c8660b78192d84ef9"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 3
-
-
-logger = logging.getLogger(__name__)
-
-
-class VcaDataChangedEvent(EventBase):
- """Event emitted whenever there is a change in the vca data."""
-
- def __init__(self, handle):
- super().__init__(handle)
-
-
-class VcaIntegratorEvents(CharmEvents):
- """VCA Integrator events.
-
- This class defines the events that ZooKeeper can emit.
-
- Events:
- vca_data_changed (_VcaDataChanged)
- """
-
- vca_data_changed = EventSource(VcaDataChangedEvent)
-
-
-RELATION_MANDATORY_KEYS = ("endpoints", "user", "secret", "public-key", "cacert", "model-configs")
-
-
-class VcaData:
- """Vca data class."""
-
- def __init__(self, data: Dict[str, Any]) -> None:
- self.data: str = data
- self.endpoints: str = data["endpoints"]
- self.user: str = data["user"]
- self.secret: str = data["secret"]
- self.public_key: str = data["public-key"]
- self.cacert: str = data["cacert"]
- self.lxd_cloud: str = data.get("lxd-cloud")
- self.lxd_credentials: str = data.get("lxd-credentials")
- self.k8s_cloud: str = data.get("k8s-cloud")
- self.k8s_credentials: str = data.get("k8s-credentials")
- self.model_configs: Dict[str, Any] = data.get("model-configs", {})
-
-
-class VcaDataMissingError(Exception):
- """Data missing exception."""
-
-
-class VcaRequires(Object):
- """Requires part of the vca relation.
-
- Attributes:
- endpoint_name: Endpoint name of the charm for the vca relation.
- data: Vca data from the relation.
- """
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "vca") -> None:
- super().__init__(charm, endpoint_name)
- self._charm = charm
- self.endpoint_name = endpoint_name
- self.framework.observe(charm.on[endpoint_name].relation_changed, self._on_relation_changed)
-
- @property
- def data(self) -> Optional[VcaData]:
- """Vca data from the relation."""
- relation: Relation = self.model.get_relation(self.endpoint_name)
- if not relation or relation.app not in relation.data:
- logger.debug("no application data in the event")
- return
-
- relation_data: Dict = dict(relation.data[relation.app])
- relation_data["model-configs"] = json.loads(relation_data.get("model-configs", "{}"))
- try:
- self._validate_relation_data(relation_data)
- return VcaData(relation_data)
- except VcaDataMissingError as e:
- logger.warning(e)
-
- def _on_relation_changed(self, event: RelationChangedEvent) -> None:
- if event.app not in event.relation.data:
- logger.debug("no application data in the event")
- return
-
- relation_data = event.relation.data[event.app]
- try:
- self._validate_relation_data(relation_data)
- self._charm.on.vca_data_changed.emit()
- except VcaDataMissingError as e:
- logger.warning(e)
-
- def _validate_relation_data(self, relation_data: Dict[str, str]) -> None:
- if not all(required_key in relation_data for required_key in RELATION_MANDATORY_KEYS):
- raise VcaDataMissingError("vca data not ready yet")
-
- clouds = ("lxd-cloud", "k8s-cloud")
- if not any(cloud in relation_data for cloud in clouds):
- raise VcaDataMissingError("no clouds defined yet")
-
-
-class VcaProvides(Object):
- """Provides part of the vca relation.
-
- Attributes:
- endpoint_name: Endpoint name of the charm for the vca relation.
- """
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "vca") -> None:
- super().__init__(charm, endpoint_name)
- self.endpoint_name = endpoint_name
-
- def update_vca_data(self, vca_data: VcaData) -> None:
- """Update vca data in relation.
-
- Args:
- vca_data: VcaData object.
- """
- relation: Relation
- for relation in self.model.relations[self.endpoint_name]:
- if not relation or self.model.app not in relation.data:
- logger.debug("relation app data not ready yet")
- for key, value in vca_data.data.items():
- if key == "model-configs":
- value = json.dumps(value)
- relation.data[self.model.app][key] = value
diff --git a/installers/charm/osm-mon/metadata.yaml b/installers/charm/osm-mon/metadata.yaml
deleted file mode 100644
index 5bd12360..00000000
--- a/installers/charm/osm-mon/metadata.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Overview on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-name: osm-mon
-
-# The following metadata are human-readable and will be published prominently on Charmhub.
-
-display-name: OSM MON
-
-summary: OSM Monitoring Service (MON)
-
-description: |
- A Kubernetes operator that deploys the Monitoring Service of OSM.
-
- TODO: two sentences on MON
-
- Small paragraph
-
- This charm doesn't make sense on its own.
- See more:
- - https://charmhub.io/osm
-
-containers:
- mon:
- resource: mon-image
-
-# This file populates the Resources tab on Charmhub.
-
-resources:
- mon-image:
- type: oci-image
- description: OCI image for mon
- upstream-source: opensourcemano/mon
-
-requires:
- kafka:
- interface: kafka
- limit: 1
- mongodb:
- interface: mongodb_client
- limit: 1
- keystone:
- interface: keystone
- limit: 1
- prometheus:
- interface: prometheus
- limit: 1
- vca:
- interface: osm-vca
diff --git a/installers/charm/osm-mon/pyproject.toml b/installers/charm/osm-mon/pyproject.toml
deleted file mode 100644
index 16cf0f4b..00000000
--- a/installers/charm/osm-mon/pyproject.toml
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-
-# Testing tools configuration
-[tool.coverage.run]
-branch = true
-
-[tool.coverage.report]
-show_missing = true
-
-[tool.pytest.ini_options]
-minversion = "6.0"
-log_cli_level = "INFO"
-
-# Formatting tools configuration
-[tool.black]
-line-length = 99
-target-version = ["py38"]
-
-[tool.isort]
-profile = "black"
-
-# Linting tools configuration
-[tool.flake8]
-max-line-length = 99
-max-doc-length = 99
-max-complexity = 10
-exclude = [".git", "__pycache__", ".tox", "build", "dist", "*.egg_info", "venv"]
-select = ["E", "W", "F", "C", "N", "R", "D", "H"]
-# Ignore W503, E501 because using black creates errors with this
-# Ignore D107 Missing docstring in __init__
-ignore = ["W503", "E501", "D107"]
-# D100, D101, D102, D103: Ignore missing docstrings in tests
-per-file-ignores = ["tests/*:D100,D101,D102,D103,D104"]
-docstring-convention = "google"
diff --git a/installers/charm/osm-mon/requirements.txt b/installers/charm/osm-mon/requirements.txt
deleted file mode 100644
index 398d4ad3..00000000
--- a/installers/charm/osm-mon/requirements.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-ops < 2.2
-lightkube
-lightkube-models
-# git+https://github.com/charmed-osm/config-validator/
diff --git a/installers/charm/osm-mon/src/charm.py b/installers/charm/osm-mon/src/charm.py
deleted file mode 100755
index 12c5dcda..00000000
--- a/installers/charm/osm-mon/src/charm.py
+++ /dev/null
@@ -1,300 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# Learn more at: https://juju.is/docs/sdk
-
-"""OSM MON charm.
-
-See more: https://charmhub.io/osm
-"""
-
-import logging
-from typing import Any, Dict
-
-from charms.data_platform_libs.v0.data_interfaces import DatabaseRequires
-from charms.kafka_k8s.v0.kafka import KafkaRequires, _KafkaAvailableEvent
-from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch
-from charms.osm_libs.v0.utils import (
- CharmError,
- DebugMode,
- HostPath,
- check_container_ready,
- check_service_active,
-)
-from charms.osm_vca_integrator.v0.vca import VcaDataChangedEvent, VcaRequires
-from lightkube.models.core_v1 import ServicePort
-from ops.charm import ActionEvent, CharmBase, CharmEvents
-from ops.framework import EventSource, StoredState
-from ops.main import main
-from ops.model import ActiveStatus, Container
-
-from legacy_interfaces import KeystoneClient, PrometheusClient
-
-HOSTPATHS = [
- HostPath(
- config="mon-hostpath",
- container_path="/usr/lib/python3/dist-packages/osm_mon",
- ),
- HostPath(
- config="common-hostpath",
- container_path="/usr/lib/python3/dist-packages/osm_common",
- ),
- HostPath(
- config="n2vc-hostpath",
- container_path="/usr/lib/python3/dist-packages/n2vc",
- ),
-]
-SERVICE_PORT = 8000
-
-logger = logging.getLogger(__name__)
-
-
-class MonEvents(CharmEvents):
- """MON events."""
-
- vca_data_changed = EventSource(VcaDataChangedEvent)
- kafka_available = EventSource(_KafkaAvailableEvent)
-
-
-class OsmMonCharm(CharmBase):
- """OSM MON Kubernetes sidecar charm."""
-
- on = MonEvents()
- _stored = StoredState()
- container_name = "mon"
- service_name = "mon"
-
- def __init__(self, *args):
- super().__init__(*args)
- self.kafka = KafkaRequires(self)
- self.mongodb_client = DatabaseRequires(self, "mongodb", database_name="osm")
- self.prometheus_client = PrometheusClient(self, "prometheus")
- self.keystone_client = KeystoneClient(self, "keystone")
- self.vca = VcaRequires(self)
- self._observe_charm_events()
- self.container: Container = self.unit.get_container(self.container_name)
- self.debug_mode = DebugMode(self, self._stored, self.container, HOSTPATHS)
- self._patch_k8s_service()
-
- @property
- def external_hostname(self) -> str:
- """External hostname property.
-
- Returns:
- str: the external hostname from config.
- If not set, return the ClusterIP service name.
- """
- return self.config.get("external-hostname") or self.app.name
-
- # ---------------------------------------------------------------------------
- # Handlers for Charm Events
- # ---------------------------------------------------------------------------
-
- def _on_config_changed(self, _) -> None:
- """Handler for the config-changed event."""
- try:
- self._validate_config()
- self._check_relations()
- # Check if the container is ready.
- # Eventually it will become ready after the first pebble-ready event.
- check_container_ready(self.container)
- if not self.debug_mode.started:
- self._configure_service(self.container)
- # Update charm status
- self._on_update_status()
- except CharmError as e:
- logger.debug(e.message)
- self.unit.status = e.status
-
- def _on_update_status(self, _=None) -> None:
- """Handler for the update-status event."""
- try:
- self._validate_config()
- self._check_relations()
- check_container_ready(self.container)
- if self.debug_mode.started:
- return
- check_service_active(self.container, self.service_name)
- self.unit.status = ActiveStatus()
- except CharmError as e:
- logger.debug(e.message)
- self.unit.status = e.status
-
- def _on_required_relation_broken(self, _) -> None:
- """Handler for the kafka-broken event."""
- try:
- check_container_ready(self.container)
- check_service_active(self.container, self.service_name)
- self.container.stop(self.container_name)
- except CharmError:
- pass
- self._on_update_status()
-
- def _on_get_debug_mode_information_action(self, event: ActionEvent) -> None:
- """Handler for the get-debug-mode-information action event."""
- if not self.debug_mode.started:
- event.fail("debug-mode has not started. Hint: juju config mon debug-mode=true")
- return
-
- debug_info = {
- "command": self.debug_mode.command,
- "password": self.debug_mode.password,
- }
- event.set_results(debug_info)
-
- # ---------------------------------------------------------------------------
- # Validation and configuration and more
- # ---------------------------------------------------------------------------
-
- def _observe_charm_events(self) -> None:
- event_handler_mapping = {
- # Core lifecycle events
- self.on.mon_pebble_ready: self._on_config_changed,
- self.on.config_changed: self._on_config_changed,
- self.on.update_status: self._on_update_status,
- # Relation events
- self.on.vca_data_changed: self._on_config_changed,
- self.on.kafka_available: self._on_config_changed,
- self.on["kafka"].relation_broken: self._on_required_relation_broken,
- self.mongodb_client.on.database_created: self._on_config_changed,
- self.on["mongodb"].relation_broken: self._on_required_relation_broken,
- # Action events
- self.on.get_debug_mode_information_action: self._on_get_debug_mode_information_action,
- }
- for relation in [self.on[rel_name] for rel_name in ["prometheus", "keystone"]]:
- event_handler_mapping[relation.relation_changed] = self._on_config_changed
- event_handler_mapping[relation.relation_broken] = self._on_required_relation_broken
-
- for event, handler in event_handler_mapping.items():
- self.framework.observe(event, handler)
-
- def _is_database_available(self) -> bool:
- try:
- return self.mongodb_client.is_resource_created()
- except KeyError:
- return False
-
- def _validate_config(self) -> None:
- """Validate charm configuration.
-
- Raises:
- CharmError: if charm configuration is invalid.
- """
- logger.debug("validating charm config")
-
- def _check_relations(self) -> None:
- """Validate charm relations.
-
- Raises:
- CharmError: if charm configuration is invalid.
- """
- logger.debug("check for missing relations")
- missing_relations = []
-
- if not self.kafka.host or not self.kafka.port:
- missing_relations.append("kafka")
- if not self._is_database_available():
- missing_relations.append("mongodb")
- if self.prometheus_client.is_missing_data_in_app():
- missing_relations.append("prometheus")
- if self.keystone_client.is_missing_data_in_app():
- missing_relations.append("keystone")
-
- if missing_relations:
- relations_str = ", ".join(missing_relations)
- one_relation_missing = len(missing_relations) == 1
- error_msg = f'need {relations_str} relation{"" if one_relation_missing else "s"}'
- logger.warning(error_msg)
- raise CharmError(error_msg)
-
- def _configure_service(self, container: Container) -> None:
- """Add Pebble layer with the mon service."""
- logger.debug(f"configuring {self.app.name} service")
- container.add_layer("mon", self._get_layer(), combine=True)
- container.replan()
-
- def _get_layer(self) -> Dict[str, Any]:
- """Get layer for Pebble."""
- environment = {
- # General configuration
- "OSMMON_GLOBAL_LOGLEVEL": self.config["log-level"],
- "OSMMON_OPENSTACK_DEFAULT_GRANULARITY": self.config["openstack-default-granularity"],
- "OSMMON_GLOBAL_REQUEST_TIMEOUT": self.config["global-request-timeout"],
- "OSMMON_COLLECTOR_INTERVAL": self.config["collector-interval"],
- "OSMMON_EVALUATOR_INTERVAL": self.config["evaluator-interval"],
- "OSMMON_COLLECTOR_VM_INFRA_METRICS": self.config["vm-infra-metrics"],
- # Kafka configuration
- "OSMMON_MESSAGE_DRIVER": "kafka",
- "OSMMON_MESSAGE_HOST": self.kafka.host,
- "OSMMON_MESSAGE_PORT": self.kafka.port,
- # Database configuration
- "OSMMON_DATABASE_DRIVER": "mongo",
- "OSMMON_DATABASE_URI": self._get_mongodb_uri(),
- "OSMMON_DATABASE_COMMONKEY": self.config["database-commonkey"],
- # Prometheus/grafana configuration
- "OSMMON_PROMETHEUS_URL": f"http://{self.prometheus_client.hostname}:{self.prometheus_client.port}",
- "OSMMON_PROMETHEUS_USER": self.prometheus_client.user,
- "OSMMON_PROMETHEUS_PASSWORD": self.prometheus_client.password,
- "OSMMON_GRAFANA_URL": self.config["grafana-url"],
- "OSMMON_GRAFANA_USER": self.config["grafana-user"],
- "OSMMON_GRAFANA_PASSWORD": self.config["grafana-password"],
- "OSMMON_KEYSTONE_ENABLED": self.config["keystone-enabled"],
- "OSMMON_KEYSTONE_URL": self.keystone_client.host,
- "OSMMON_KEYSTONE_DOMAIN_NAME": self.keystone_client.user_domain_name,
- "OSMMON_KEYSTONE_SERVICE_PROJECT": self.keystone_client.service,
- "OSMMON_KEYSTONE_SERVICE_USER": self.keystone_client.username,
- "OSMMON_KEYSTONE_SERVICE_PASSWORD": self.keystone_client.password,
- "OSMMON_KEYSTONE_SERVICE_PROJECT_DOMAIN_NAME": self.keystone_client.project_domain_name,
- }
- logger.info(f"{environment}")
- if self.vca.data:
- environment["OSMMON_VCA_HOST"] = self.vca.data.endpoints
- environment["OSMMON_VCA_SECRET"] = self.vca.data.secret
- environment["OSMMON_VCA_USER"] = self.vca.data.user
- environment["OSMMON_VCA_CACERT"] = self.vca.data.cacert
- return {
- "summary": "mon layer",
- "description": "pebble config layer for mon",
- "services": {
- self.service_name: {
- "override": "replace",
- "summary": "mon service",
- "command": "/bin/bash -c 'cd /app/osm_mon/ && /bin/bash start.sh'",
- "startup": "enabled",
- "user": "appuser",
- "group": "appuser",
- "working-dir": "/app/osm_mon", # This parameter has no effect in Juju 2.9.x
- "environment": environment,
- }
- },
- }
-
- def _get_mongodb_uri(self):
- return list(self.mongodb_client.fetch_relation_data().values())[0]["uris"]
-
- def _patch_k8s_service(self) -> None:
- port = ServicePort(SERVICE_PORT, name=f"{self.app.name}")
- self.service_patcher = KubernetesServicePatch(self, [port])
-
-
-if __name__ == "__main__": # pragma: no cover
- main(OsmMonCharm)
diff --git a/installers/charm/osm-mon/src/legacy_interfaces.py b/installers/charm/osm-mon/src/legacy_interfaces.py
deleted file mode 100644
index 5deb3f5f..00000000
--- a/installers/charm/osm-mon/src/legacy_interfaces.py
+++ /dev/null
@@ -1,205 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-# flake8: noqa
-
-import ops
-
-
-class BaseRelationClient(ops.framework.Object):
- """Requires side of a Kafka Endpoint"""
-
- def __init__(
- self,
- charm: ops.charm.CharmBase,
- relation_name: str,
- mandatory_fields: list = [],
- ):
- super().__init__(charm, relation_name)
- self.relation_name = relation_name
- self.mandatory_fields = mandatory_fields
- self._update_relation()
-
- def get_data_from_unit(self, key: str):
- if not self.relation:
- # This update relation doesn't seem to be needed, but I added it because apparently
- # the data is empty in the unit tests.
- # In reality, the constructor is called in every hook.
- # In the unit tests when doing an update_relation_data, apparently it is not called.
- self._update_relation()
- if self.relation:
- for unit in self.relation.units:
- data = self.relation.data[unit].get(key)
- if data:
- return data
-
- def get_data_from_app(self, key: str):
- if not self.relation or self.relation.app not in self.relation.data:
- # This update relation doesn't seem to be needed, but I added it because apparently
- # the data is empty in the unit tests.
- # In reality, the constructor is called in every hook.
- # In the unit tests when doing an update_relation_data, apparently it is not called.
- self._update_relation()
- if self.relation and self.relation.app in self.relation.data:
- data = self.relation.data[self.relation.app].get(key)
- if data:
- return data
-
- def is_missing_data_in_unit(self):
- return not all([self.get_data_from_unit(field) for field in self.mandatory_fields])
-
- def is_missing_data_in_app(self):
- return not all([self.get_data_from_app(field) for field in self.mandatory_fields])
-
- def _update_relation(self):
- self.relation = self.framework.model.get_relation(self.relation_name)
-
-
-class KeystoneClient(BaseRelationClient):
- """Requires side of a Keystone Endpoint"""
-
- mandatory_fields = [
- "host",
- "port",
- "user_domain_name",
- "project_domain_name",
- "username",
- "password",
- "service",
- "keystone_db_password",
- "region_id",
- "admin_username",
- "admin_password",
- "admin_project_name",
- ]
-
- def __init__(self, charm: ops.charm.CharmBase, relation_name: str):
- super().__init__(charm, relation_name, self.mandatory_fields)
-
- @property
- def host(self):
- return self.get_data_from_app("host")
-
- @property
- def port(self):
- return self.get_data_from_app("port")
-
- @property
- def user_domain_name(self):
- return self.get_data_from_app("user_domain_name")
-
- @property
- def project_domain_name(self):
- return self.get_data_from_app("project_domain_name")
-
- @property
- def username(self):
- return self.get_data_from_app("username")
-
- @property
- def password(self):
- return self.get_data_from_app("password")
-
- @property
- def service(self):
- return self.get_data_from_app("service")
-
- @property
- def keystone_db_password(self):
- return self.get_data_from_app("keystone_db_password")
-
- @property
- def region_id(self):
- return self.get_data_from_app("region_id")
-
- @property
- def admin_username(self):
- return self.get_data_from_app("admin_username")
-
- @property
- def admin_password(self):
- return self.get_data_from_app("admin_password")
-
- @property
- def admin_project_name(self):
- return self.get_data_from_app("admin_project_name")
-
-
-class MongoClient(BaseRelationClient):
- """Requires side of a Mongo Endpoint"""
-
- mandatory_fields_mapping = {
- "reactive": ["connection_string"],
- "ops": ["replica_set_uri", "replica_set_name"],
- }
-
- def __init__(self, charm: ops.charm.CharmBase, relation_name: str):
- super().__init__(charm, relation_name, mandatory_fields=[])
-
- @property
- def connection_string(self):
- if self.is_opts():
- replica_set_uri = self.get_data_from_unit("replica_set_uri")
- replica_set_name = self.get_data_from_unit("replica_set_name")
- return f"{replica_set_uri}?replicaSet={replica_set_name}"
- else:
- return self.get_data_from_unit("connection_string")
-
- def is_opts(self):
- return not self.is_missing_data_in_unit_ops()
-
- def is_missing_data_in_unit(self):
- return self.is_missing_data_in_unit_ops() and self.is_missing_data_in_unit_reactive()
-
- def is_missing_data_in_unit_ops(self):
- return not all(
- [self.get_data_from_unit(field) for field in self.mandatory_fields_mapping["ops"]]
- )
-
- def is_missing_data_in_unit_reactive(self):
- return not all(
- [self.get_data_from_unit(field) for field in self.mandatory_fields_mapping["reactive"]]
- )
-
-
-class PrometheusClient(BaseRelationClient):
- """Requires side of a Prometheus Endpoint"""
-
- mandatory_fields = ["hostname", "port"]
-
- def __init__(self, charm: ops.charm.CharmBase, relation_name: str):
- super().__init__(charm, relation_name, self.mandatory_fields)
-
- @property
- def hostname(self):
- return self.get_data_from_app("hostname")
-
- @property
- def port(self):
- return self.get_data_from_app("port")
-
- @property
- def user(self):
- return self.get_data_from_app("user")
-
- @property
- def password(self):
- return self.get_data_from_app("password")
diff --git a/installers/charm/osm-mon/tests/integration/test_charm.py b/installers/charm/osm-mon/tests/integration/test_charm.py
deleted file mode 100644
index caf8deda..00000000
--- a/installers/charm/osm-mon/tests/integration/test_charm.py
+++ /dev/null
@@ -1,214 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-# Learn more about testing at: https://juju.is/docs/sdk/testing
-
-import asyncio
-import logging
-import shlex
-from pathlib import Path
-
-import pytest
-import yaml
-from pytest_operator.plugin import OpsTest
-
-logger = logging.getLogger(__name__)
-
-METADATA = yaml.safe_load(Path("./metadata.yaml").read_text())
-MON_APP = METADATA["name"]
-KAFKA_CHARM = "kafka-k8s"
-KAFKA_APP = "kafka"
-KEYSTONE_CHARM = "osm-keystone"
-KEYSTONE_APP = "keystone"
-MARIADB_CHARM = "charmed-osm-mariadb-k8s"
-MARIADB_APP = "mariadb"
-MONGO_DB_CHARM = "mongodb-k8s"
-MONGO_DB_APP = "mongodb"
-PROMETHEUS_CHARM = "osm-prometheus"
-PROMETHEUS_APP = "prometheus"
-ZOOKEEPER_CHARM = "zookeeper-k8s"
-ZOOKEEPER_APP = "zookeeper"
-VCA_CHARM = "osm-vca-integrator"
-VCA_APP = "vca"
-APPS = [KAFKA_APP, ZOOKEEPER_APP, KEYSTONE_APP, MONGO_DB_APP, MARIADB_APP, PROMETHEUS_APP, MON_APP]
-
-
-@pytest.mark.abort_on_fail
-async def test_mon_is_deployed(ops_test: OpsTest):
- charm = await ops_test.build_charm(".")
- resources = {"mon-image": METADATA["resources"]["mon-image"]["upstream-source"]}
-
- await asyncio.gather(
- ops_test.model.deploy(
- charm, resources=resources, application_name=MON_APP, series="jammy"
- ),
- ops_test.model.deploy(KAFKA_CHARM, application_name=KAFKA_APP, channel="stable"),
- ops_test.model.deploy(MONGO_DB_CHARM, application_name=MONGO_DB_APP, channel="5/edge"),
- ops_test.model.deploy(MARIADB_CHARM, application_name=MARIADB_APP, channel="stable"),
- ops_test.model.deploy(PROMETHEUS_CHARM, application_name=PROMETHEUS_APP, channel="stable"),
- ops_test.model.deploy(ZOOKEEPER_CHARM, application_name=ZOOKEEPER_APP, channel="stable"),
- )
- keystone_image = "opensourcemano/keystone:testing-daily"
- cmd = f"juju deploy {KEYSTONE_CHARM} {KEYSTONE_APP} --resource keystone-image={keystone_image} --channel=latest/beta --series jammy"
- await ops_test.run(*shlex.split(cmd), check=True)
-
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS,
- )
- assert ops_test.model.applications[MON_APP].status == "blocked"
- unit = ops_test.model.applications[MON_APP].units[0]
- assert unit.workload_status_message == "need kafka, mongodb, prometheus, keystone relations"
-
- logger.info("Adding relations for other components")
- await ops_test.model.add_relation(KAFKA_APP, ZOOKEEPER_APP)
- await ops_test.model.add_relation(MARIADB_APP, KEYSTONE_APP)
-
- logger.info("Adding relations for MON")
- await ops_test.model.add_relation(
- "{}:mongodb".format(MON_APP), "{}:database".format(MONGO_DB_APP)
- )
- await ops_test.model.add_relation(MON_APP, KAFKA_APP)
- await ops_test.model.add_relation(MON_APP, KEYSTONE_APP)
- await ops_test.model.add_relation(MON_APP, PROMETHEUS_APP)
-
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS,
- status="active",
- )
-
-
-@pytest.mark.abort_on_fail
-async def test_mon_scales_up(ops_test: OpsTest):
- logger.info("Scaling up osm-mon")
- expected_units = 3
- assert len(ops_test.model.applications[MON_APP].units) == 1
- await ops_test.model.applications[MON_APP].scale(expected_units)
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=[MON_APP], status="active", wait_for_exact_units=expected_units
- )
-
-
-@pytest.mark.abort_on_fail
-@pytest.mark.parametrize(
- "relation_to_remove", [KAFKA_APP, MONGO_DB_APP, PROMETHEUS_APP, KEYSTONE_APP]
-)
-async def test_mon_blocks_without_relation(ops_test: OpsTest, relation_to_remove):
- logger.info("Removing relation: %s", relation_to_remove)
- # mongoDB relation is named "database"
- local_relation = relation_to_remove
- if relation_to_remove == MONGO_DB_APP:
- local_relation = "database"
- await asyncio.gather(
- ops_test.model.applications[relation_to_remove].remove_relation(local_relation, MON_APP)
- )
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(apps=[MON_APP])
- assert ops_test.model.applications[MON_APP].status == "blocked"
- for unit in ops_test.model.applications[MON_APP].units:
- assert unit.workload_status_message == f"need {relation_to_remove} relation"
- await ops_test.model.add_relation(MON_APP, relation_to_remove)
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS,
- status="active",
- )
-
-
-@pytest.mark.abort_on_fail
-async def test_mon_action_debug_mode_disabled(ops_test: OpsTest):
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS,
- status="active",
- )
- logger.info("Running action 'get-debug-mode-information'")
- action = (
- await ops_test.model.applications[MON_APP]
- .units[0]
- .run_action("get-debug-mode-information")
- )
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(apps=[MON_APP])
- status = await ops_test.model.get_action_status(uuid_or_prefix=action.entity_id)
- assert status[action.entity_id] == "failed"
-
-
-@pytest.mark.abort_on_fail
-async def test_mon_action_debug_mode_enabled(ops_test: OpsTest):
- await ops_test.model.applications[MON_APP].set_config({"debug-mode": "true"})
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS,
- status="active",
- )
- logger.info("Running action 'get-debug-mode-information'")
- # list of units is not ordered
- unit_id = list(
- filter(
- lambda x: (x.entity_id == f"{MON_APP}/0"), ops_test.model.applications[MON_APP].units
- )
- )[0]
- action = await unit_id.run_action("get-debug-mode-information")
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(apps=[MON_APP])
- status = await ops_test.model.get_action_status(uuid_or_prefix=action.entity_id)
- message = await ops_test.model.get_action_output(action_uuid=action.entity_id)
- assert status[action.entity_id] == "completed"
- assert "command" in message
- assert "password" in message
-
-
-@pytest.mark.abort_on_fail
-async def test_mon_integration_vca(ops_test: OpsTest):
- await asyncio.gather(
- ops_test.model.deploy(
- VCA_CHARM, application_name=VCA_APP, channel="latest/beta", series="jammy"
- ),
- )
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=[VCA_APP],
- )
- controllers = (Path.home() / ".local/share/juju/controllers.yaml").read_text()
- accounts = (Path.home() / ".local/share/juju/accounts.yaml").read_text()
- public_key = (Path.home() / ".local/share/juju/ssh/juju_id_rsa.pub").read_text()
- await ops_test.model.applications[VCA_APP].set_config(
- {
- "controllers": controllers,
- "accounts": accounts,
- "public-key": public_key,
- "k8s-cloud": "microk8s",
- }
- )
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS + [VCA_APP],
- status="active",
- )
- await ops_test.model.add_relation(MON_APP, VCA_APP)
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS + [VCA_APP],
- status="active",
- )
diff --git a/installers/charm/osm-mon/tests/unit/test_charm.py b/installers/charm/osm-mon/tests/unit/test_charm.py
deleted file mode 100644
index 33598fe6..00000000
--- a/installers/charm/osm-mon/tests/unit/test_charm.py
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-# Learn more about testing at: https://juju.is/docs/sdk/testing
-
-import pytest
-from ops.model import ActiveStatus, BlockedStatus
-from ops.testing import Harness
-from pytest_mock import MockerFixture
-
-from charm import CharmError, OsmMonCharm, check_service_active
-
-container_name = "mon"
-service_name = "mon"
-
-
-@pytest.fixture
-def harness(mocker: MockerFixture):
- mocker.patch("charm.KubernetesServicePatch", lambda x, y: None)
- harness = Harness(OsmMonCharm)
- harness.begin()
- harness.container_pebble_ready(container_name)
- yield harness
- harness.cleanup()
-
-
-def test_missing_relations(harness: Harness):
- harness.charm.on.config_changed.emit()
- assert type(harness.charm.unit.status) == BlockedStatus
- assert all(
- relation in harness.charm.unit.status.message
- for relation in ["mongodb", "kafka", "prometheus", "keystone"]
- )
-
-
-def test_ready(harness: Harness):
- _add_relations(harness)
- assert harness.charm.unit.status == ActiveStatus()
-
-
-def test_container_stops_after_relation_broken(harness: Harness):
- harness.charm.on[container_name].pebble_ready.emit(container_name)
- container = harness.charm.unit.get_container(container_name)
- relation_ids = _add_relations(harness)
- check_service_active(container, service_name)
- harness.remove_relation(relation_ids[0])
- with pytest.raises(CharmError):
- check_service_active(container, service_name)
-
-
-def _add_relations(harness: Harness):
- relation_ids = []
- # Add mongo relation
- relation_id = harness.add_relation("mongodb", "mongodb")
- harness.add_relation_unit(relation_id, "mongodb/0")
- harness.update_relation_data(
- relation_id,
- "mongodb",
- {"uris": "mongodb://:1234", "username": "user", "password": "password"},
- )
- relation_ids.append(relation_id)
- # Add kafka relation
- relation_id = harness.add_relation("kafka", "kafka")
- harness.add_relation_unit(relation_id, "kafka/0")
- harness.update_relation_data(relation_id, "kafka", {"host": "kafka", "port": "9092"})
- relation_ids.append(relation_id)
- # Add prometheus relation
- relation_id = harness.add_relation("prometheus", "prometheus")
- harness.add_relation_unit(relation_id, "prometheus/0")
- harness.update_relation_data(
- relation_id, "prometheus", {"hostname": "prometheus", "port": "9090"}
- )
- relation_ids.append(relation_id)
- # Add keystone relation
- relation_id = harness.add_relation("keystone", "keystone")
- harness.add_relation_unit(relation_id, "keystone/0")
- harness.update_relation_data(
- relation_id,
- "keystone",
- {
- "host": "host",
- "port": "port",
- "user_domain_name": "user_domain_name",
- "project_domain_name": "project_domain_name",
- "username": "username",
- "password": "password",
- "service": "service",
- "keystone_db_password": "keystone_db_password",
- "region_id": "region_id",
- "admin_username": "admin_username",
- "admin_password": "admin_password",
- "admin_project_name": "admin_project_name",
- },
- )
- relation_ids.append(relation_id)
- return relation_ids
diff --git a/installers/charm/osm-mon/tox.ini b/installers/charm/osm-mon/tox.ini
deleted file mode 100644
index 64bab107..00000000
--- a/installers/charm/osm-mon/tox.ini
+++ /dev/null
@@ -1,92 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-
-[tox]
-skipsdist=True
-skip_missing_interpreters = True
-envlist = lint, unit, integration
-
-[vars]
-src_path = {toxinidir}/src/
-tst_path = {toxinidir}/tests/
-all_path = {[vars]src_path} {[vars]tst_path}
-
-[testenv]
-basepython = python3.8
-setenv =
- PYTHONPATH = {toxinidir}:{toxinidir}/lib:{[vars]src_path}
- PYTHONBREAKPOINT=ipdb.set_trace
- PY_COLORS=1
-passenv =
- PYTHONPATH
- CHARM_BUILD_DIR
- MODEL_SETTINGS
-
-[testenv:fmt]
-description = Apply coding style standards to code
-deps =
- black
- isort
-commands =
- isort {[vars]all_path}
- black {[vars]all_path}
-
-[testenv:lint]
-description = Check code against coding style standards
-deps =
- black
- flake8
- flake8-docstrings
- flake8-builtins
- pyproject-flake8
- pep8-naming
- isort
- codespell
-commands =
- codespell {toxinidir} --skip {toxinidir}/.git --skip {toxinidir}/.tox \
- --skip {toxinidir}/build --skip {toxinidir}/lib --skip {toxinidir}/venv \
- --skip {toxinidir}/.mypy_cache --skip {toxinidir}/icon.svg
- # pflake8 wrapper supports config from pyproject.toml
- pflake8 {[vars]all_path}
- isort --check-only --diff {[vars]all_path}
- black --check --diff {[vars]all_path}
-
-[testenv:unit]
-description = Run unit tests
-deps =
- pytest
- pytest-mock
- coverage[toml]
- -r{toxinidir}/requirements.txt
-commands =
- coverage run --source={[vars]src_path} \
- -m pytest --ignore={[vars]tst_path}integration -v --tb native -s {posargs}
- coverage report
- coverage xml
-
-[testenv:integration]
-description = Run integration tests
-deps =
- pytest
- juju<3
- pytest-operator
- -r{toxinidir}/requirements.txt
-commands =
- pytest -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs} --cloud microk8s
diff --git a/installers/charm/osm-nbi/.gitignore b/installers/charm/osm-nbi/.gitignore
deleted file mode 100644
index 87d0a587..00000000
--- a/installers/charm/osm-nbi/.gitignore
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-venv/
-build/
-*.charm
-.tox/
-.coverage
-coverage.xml
-__pycache__/
-*.py[cod]
-.vscode
\ No newline at end of file
diff --git a/installers/charm/osm-nbi/.jujuignore b/installers/charm/osm-nbi/.jujuignore
deleted file mode 100644
index 17c7a8bb..00000000
--- a/installers/charm/osm-nbi/.jujuignore
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-/venv
-*.py[cod]
-*.charm
diff --git a/installers/charm/osm-nbi/CONTRIBUTING.md b/installers/charm/osm-nbi/CONTRIBUTING.md
deleted file mode 100644
index c59b9703..00000000
--- a/installers/charm/osm-nbi/CONTRIBUTING.md
+++ /dev/null
@@ -1,78 +0,0 @@
-
-
-# Contributing
-
-## Overview
-
-This documents explains the processes and practices recommended for contributing enhancements to
-this operator.
-
-- Generally, before developing enhancements to this charm, you should consider [opening an issue
- ](https://osm.etsi.org/bugzilla/enter_bug.cgi?product=OSM) explaining your use case. (Component=devops, version=master)
-- If you would like to chat with us about your use-cases or proposed implementation, you can reach
- us at [OSM Juju public channel](https://opensourcemano.slack.com/archives/C027KJGPECA).
-- Familiarising yourself with the [Charmed Operator Framework](https://juju.is/docs/sdk) library
- will help you a lot when working on new features or bug fixes.
-- All enhancements require review before being merged. Code review typically examines
- - code quality
- - test coverage
- - user experience for Juju administrators this charm.
-- Please help us out in ensuring easy to review branches by rebasing your gerrit patch onto
- the `master` branch.
-
-## Developing
-
-You can use the environments created by `tox` for development:
-
-```shell
-tox --notest -e unit
-source .tox/unit/bin/activate
-```
-
-### Testing
-
-```shell
-tox -e fmt # update your code according to linting rules
-tox -e lint # code style
-tox -e unit # unit tests
-tox -e integration # integration tests
-tox # runs 'lint' and 'unit' environments
-```
-
-## Build charm
-
-Build the charm in this git repository using:
-
-```shell
-charmcraft pack
-```
-
-### Deploy
-
-```bash
-# Create a model
-juju add-model dev
-# Enable DEBUG logging
-juju model-config logging-config="=INFO;unit=DEBUG"
-# Deploy the charm
-juju deploy ./osm-nbi_ubuntu-22.04-amd64.charm \
- --resource nbi-image=opensourcemano/nbi:testing-daily --series jammy
-```
diff --git a/installers/charm/osm-nbi/LICENSE b/installers/charm/osm-nbi/LICENSE
deleted file mode 100644
index 7e9d5046..00000000
--- a/installers/charm/osm-nbi/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2022 Canonical Ltd.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/installers/charm/osm-nbi/README.md b/installers/charm/osm-nbi/README.md
deleted file mode 100644
index 5cff9bf4..00000000
--- a/installers/charm/osm-nbi/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-
-
-
-
-# OSM NBI
-
-Charmhub package name: osm-nbi
-More information: https://charmhub.io/osm-nbi
-
-## Other resources
-
-* [Read more](https://osm.etsi.org/docs/user-guide/latest/)
-
-* [Contributing](https://osm.etsi.org/gitweb/?p=osm/devops.git;a=blob;f=installers/charm/osm-nbi/CONTRIBUTING.md)
-
-* See the [Juju SDK documentation](https://juju.is/docs/sdk) for more information about developing and improving charms.
-
diff --git a/installers/charm/osm-nbi/actions.yaml b/installers/charm/osm-nbi/actions.yaml
deleted file mode 100644
index 0d73468f..00000000
--- a/installers/charm/osm-nbi/actions.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Actions tab on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-get-debug-mode-information:
- description: Get information to debug the container
diff --git a/installers/charm/osm-nbi/charmcraft.yaml b/installers/charm/osm-nbi/charmcraft.yaml
deleted file mode 100644
index 3fce6d04..00000000
--- a/installers/charm/osm-nbi/charmcraft.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-
-type: charm
-bases:
- - build-on:
- - name: "ubuntu"
- channel: "22.04"
- run-on:
- - name: "ubuntu"
- channel: "22.04"
-
-parts:
- charm:
- build-packages:
- - git
- prime:
- - files/*
diff --git a/installers/charm/osm-nbi/config.yaml b/installers/charm/osm-nbi/config.yaml
deleted file mode 100644
index d2c8c628..00000000
--- a/installers/charm/osm-nbi/config.yaml
+++ /dev/null
@@ -1,109 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Configure tab on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-options:
- log-level:
- default: "INFO"
- description: |
- Set the Logging Level.
-
- Options:
- - TRACE
- - DEBUG
- - INFO
- - WARN
- - ERROR
- - FATAL
- type: string
- database-commonkey:
- description: Database COMMON KEY
- type: string
- default: osm
-
- # Ingress options
- external-hostname:
- default: ""
- description: |
- The url that will be configured in the Kubernetes ingress.
-
- The easiest way of configuring the external-hostname without having the DNS setup is by using
- a Wildcard DNS like nip.io constructing the url like so:
- - nbi.127.0.0.1.nip.io (valid within the K8s cluster node)
- - nbi..nip.io (valid from outside the K8s cluster node)
-
- This option is only applicable when the Kubernetes cluster has nginx ingress configured
- and the charm is related to the nginx-ingress-integrator.
- See more: https://charmhub.io/nginx-ingress-integrator
- type: string
- max-body-size:
- default: 20
- description: Max allowed body-size (for file uploads) in megabytes, set to 0 to
- disable limits.
- type: int
- tls-secret-name:
- description: TLS secret name to use for ingress.
- type: string
-
- # Debug-mode options
- debug-mode:
- type: boolean
- description: |
- Great for OSM Developers! (Not recommended for production deployments)
-
- This action activates the Debug Mode, which sets up the container to be ready for debugging.
- As part of the setup, SSH is enabled and a VSCode workspace file is automatically populated.
-
- After enabling the debug-mode, execute the following command to get the information you need
- to start debugging:
- `juju run-action get-debug-mode-information --wait`
-
- The previous command returns the command you need to execute, and the SSH password that was set.
-
- See also:
- - https://charmhub.io/osm-nbi/configure#nbi-hostpath
- - https://charmhub.io/osm-nbi/configure#common-hostpath
- default: false
- nbi-hostpath:
- type: string
- description: |
- Set this config to the local path of the NBI module to persist the changes done during the
- debug-mode session.
-
- Example:
- $ git clone "https://osm.etsi.org/gerrit/osm/NBI" /home/ubuntu/NBI
- $ juju config nbi nbi-hostpath=/home/ubuntu/NBI
-
- This configuration only applies if option `debug-mode` is set to true.
-
- common-hostpath:
- type: string
- description: |
- Set this config to the local path of the common module to persist the changes done during the
- debug-mode session.
-
- Example:
- $ git clone "https://osm.etsi.org/gerrit/osm/common" /home/ubuntu/common
- $ juju config nbi common-hostpath=/home/ubuntu/common
-
- This configuration only applies if option `debug-mode` is set to true.
diff --git a/installers/charm/osm-nbi/files/vscode-workspace.json b/installers/charm/osm-nbi/files/vscode-workspace.json
deleted file mode 100644
index f2baa1d1..00000000
--- a/installers/charm/osm-nbi/files/vscode-workspace.json
+++ /dev/null
@@ -1,26 +0,0 @@
-{
- "folders": [
- {
- "path": "/usr/lib/python3/dist-packages/osm_nbi"
- },
- {
- "path": "/usr/lib/python3/dist-packages/osm_common"
- },
- {
- "path": "/usr/lib/python3/dist-packages/osm_im"
- },
- ],
- "settings": {},
- "launch": {
- "version": "0.2.0",
- "configurations": [
- {
- "name": "NBI",
- "type": "python",
- "request": "launch",
- "module": "osm_nbi.nbi",
- "justMyCode": false,
- }
- ]
- }
-}
\ No newline at end of file
diff --git a/installers/charm/osm-nbi/lib/charms/data_platform_libs/v0/data_interfaces.py b/installers/charm/osm-nbi/lib/charms/data_platform_libs/v0/data_interfaces.py
deleted file mode 100644
index b3da5aa4..00000000
--- a/installers/charm/osm-nbi/lib/charms/data_platform_libs/v0/data_interfaces.py
+++ /dev/null
@@ -1,1130 +0,0 @@
-# Copyright 2023 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Library to manage the relation for the data-platform products.
-
-This library contains the Requires and Provides classes for handling the relation
-between an application and multiple managed application supported by the data-team:
-MySQL, Postgresql, MongoDB, Redis, and Kakfa.
-
-### Database (MySQL, Postgresql, MongoDB, and Redis)
-
-#### Requires Charm
-This library is a uniform interface to a selection of common database
-metadata, with added custom events that add convenience to database management,
-and methods to consume the application related data.
-
-
-Following an example of using the DatabaseCreatedEvent, in the context of the
-application charm code:
-
-```python
-
-from charms.data_platform_libs.v0.data_interfaces import (
- DatabaseCreatedEvent,
- DatabaseRequires,
-)
-
-class ApplicationCharm(CharmBase):
- # Application charm that connects to database charms.
-
- def __init__(self, *args):
- super().__init__(*args)
-
- # Charm events defined in the database requires charm library.
- self.database = DatabaseRequires(self, relation_name="database", database_name="database")
- self.framework.observe(self.database.on.database_created, self._on_database_created)
-
- def _on_database_created(self, event: DatabaseCreatedEvent) -> None:
- # Handle the created database
-
- # Create configuration file for app
- config_file = self._render_app_config_file(
- event.username,
- event.password,
- event.endpoints,
- )
-
- # Start application with rendered configuration
- self._start_application(config_file)
-
- # Set active status
- self.unit.status = ActiveStatus("received database credentials")
-```
-
-As shown above, the library provides some custom events to handle specific situations,
-which are listed below:
-
-- database_created: event emitted when the requested database is created.
-- endpoints_changed: event emitted when the read/write endpoints of the database have changed.
-- read_only_endpoints_changed: event emitted when the read-only endpoints of the database
- have changed. Event is not triggered if read/write endpoints changed too.
-
-If it is needed to connect multiple database clusters to the same relation endpoint
-the application charm can implement the same code as if it would connect to only
-one database cluster (like the above code example).
-
-To differentiate multiple clusters connected to the same relation endpoint
-the application charm can use the name of the remote application:
-
-```python
-
-def _on_database_created(self, event: DatabaseCreatedEvent) -> None:
- # Get the remote app name of the cluster that triggered this event
- cluster = event.relation.app.name
-```
-
-It is also possible to provide an alias for each different database cluster/relation.
-
-So, it is possible to differentiate the clusters in two ways.
-The first is to use the remote application name, i.e., `event.relation.app.name`, as above.
-
-The second way is to use different event handlers to handle each cluster events.
-The implementation would be something like the following code:
-
-```python
-
-from charms.data_platform_libs.v0.data_interfaces import (
- DatabaseCreatedEvent,
- DatabaseRequires,
-)
-
-class ApplicationCharm(CharmBase):
- # Application charm that connects to database charms.
-
- def __init__(self, *args):
- super().__init__(*args)
-
- # Define the cluster aliases and one handler for each cluster database created event.
- self.database = DatabaseRequires(
- self,
- relation_name="database",
- database_name="database",
- relations_aliases = ["cluster1", "cluster2"],
- )
- self.framework.observe(
- self.database.on.cluster1_database_created, self._on_cluster1_database_created
- )
- self.framework.observe(
- self.database.on.cluster2_database_created, self._on_cluster2_database_created
- )
-
- def _on_cluster1_database_created(self, event: DatabaseCreatedEvent) -> None:
- # Handle the created database on the cluster named cluster1
-
- # Create configuration file for app
- config_file = self._render_app_config_file(
- event.username,
- event.password,
- event.endpoints,
- )
- ...
-
- def _on_cluster2_database_created(self, event: DatabaseCreatedEvent) -> None:
- # Handle the created database on the cluster named cluster2
-
- # Create configuration file for app
- config_file = self._render_app_config_file(
- event.username,
- event.password,
- event.endpoints,
- )
- ...
-
-```
-
-### Provider Charm
-
-Following an example of using the DatabaseRequestedEvent, in the context of the
-database charm code:
-
-```python
-from charms.data_platform_libs.v0.data_interfaces import DatabaseProvides
-
-class SampleCharm(CharmBase):
-
- def __init__(self, *args):
- super().__init__(*args)
- # Charm events defined in the database provides charm library.
- self.provided_database = DatabaseProvides(self, relation_name="database")
- self.framework.observe(self.provided_database.on.database_requested,
- self._on_database_requested)
- # Database generic helper
- self.database = DatabaseHelper()
-
- def _on_database_requested(self, event: DatabaseRequestedEvent) -> None:
- # Handle the event triggered by a new database requested in the relation
- # Retrieve the database name using the charm library.
- db_name = event.database
- # generate a new user credential
- username = self.database.generate_user()
- password = self.database.generate_password()
- # set the credentials for the relation
- self.provided_database.set_credentials(event.relation.id, username, password)
- # set other variables for the relation event.set_tls("False")
-```
-As shown above, the library provides a custom event (database_requested) to handle
-the situation when an application charm requests a new database to be created.
-It's preferred to subscribe to this event instead of relation changed event to avoid
-creating a new database when other information other than a database name is
-exchanged in the relation databag.
-
-### Kafka
-
-This library is the interface to use and interact with the Kafka charm. This library contains
-custom events that add convenience to manage Kafka, and provides methods to consume the
-application related data.
-
-#### Requirer Charm
-
-```python
-
-from charms.data_platform_libs.v0.data_interfaces import (
- BootstrapServerChangedEvent,
- KafkaRequires,
- TopicCreatedEvent,
-)
-
-class ApplicationCharm(CharmBase):
-
- def __init__(self, *args):
- super().__init__(*args)
- self.kafka = KafkaRequires(self, "kafka_client", "test-topic")
- self.framework.observe(
- self.kafka.on.bootstrap_server_changed, self._on_kafka_bootstrap_server_changed
- )
- self.framework.observe(
- self.kafka.on.topic_created, self._on_kafka_topic_created
- )
-
- def _on_kafka_bootstrap_server_changed(self, event: BootstrapServerChangedEvent):
- # Event triggered when a bootstrap server was changed for this application
-
- new_bootstrap_server = event.bootstrap_server
- ...
-
- def _on_kafka_topic_created(self, event: TopicCreatedEvent):
- # Event triggered when a topic was created for this application
- username = event.username
- password = event.password
- tls = event.tls
- tls_ca= event.tls_ca
- bootstrap_server event.bootstrap_server
- consumer_group_prefic = event.consumer_group_prefix
- zookeeper_uris = event.zookeeper_uris
- ...
-
-```
-
-As shown above, the library provides some custom events to handle specific situations,
-which are listed below:
-
-- topic_created: event emitted when the requested topic is created.
-- bootstrap_server_changed: event emitted when the bootstrap server have changed.
-- credential_changed: event emitted when the credentials of Kafka changed.
-
-### Provider Charm
-
-Following the previous example, this is an example of the provider charm.
-
-```python
-class SampleCharm(CharmBase):
-
-from charms.data_platform_libs.v0.data_interfaces import (
- KafkaProvides,
- TopicRequestedEvent,
-)
-
- def __init__(self, *args):
- super().__init__(*args)
-
- # Default charm events.
- self.framework.observe(self.on.start, self._on_start)
-
- # Charm events defined in the Kafka Provides charm library.
- self.kafka_provider = KafkaProvides(self, relation_name="kafka_client")
- self.framework.observe(self.kafka_provider.on.topic_requested, self._on_topic_requested)
- # Kafka generic helper
- self.kafka = KafkaHelper()
-
- def _on_topic_requested(self, event: TopicRequestedEvent):
- # Handle the on_topic_requested event.
-
- topic = event.topic
- relation_id = event.relation.id
- # set connection info in the databag relation
- self.kafka_provider.set_bootstrap_server(relation_id, self.kafka.get_bootstrap_server())
- self.kafka_provider.set_credentials(relation_id, username=username, password=password)
- self.kafka_provider.set_consumer_group_prefix(relation_id, ...)
- self.kafka_provider.set_tls(relation_id, "False")
- self.kafka_provider.set_zookeeper_uris(relation_id, ...)
-
-```
-As shown above, the library provides a custom event (topic_requested) to handle
-the situation when an application charm requests a new topic to be created.
-It is preferred to subscribe to this event instead of relation changed event to avoid
-creating a new topic when other information other than a topic name is
-exchanged in the relation databag.
-"""
-
-import json
-import logging
-from abc import ABC, abstractmethod
-from collections import namedtuple
-from datetime import datetime
-from typing import List, Optional
-
-from ops.charm import (
- CharmBase,
- CharmEvents,
- RelationChangedEvent,
- RelationEvent,
- RelationJoinedEvent,
-)
-from ops.framework import EventSource, Object
-from ops.model import Relation
-
-# The unique Charmhub library identifier, never change it
-LIBID = "6c3e6b6680d64e9c89e611d1a15f65be"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 7
-
-PYDEPS = ["ops>=2.0.0"]
-
-logger = logging.getLogger(__name__)
-
-Diff = namedtuple("Diff", "added changed deleted")
-Diff.__doc__ = """
-A tuple for storing the diff between two data mappings.
-
-added - keys that were added
-changed - keys that still exist but have new values
-deleted - key that were deleted"""
-
-
-def diff(event: RelationChangedEvent, bucket: str) -> Diff:
- """Retrieves the diff of the data in the relation changed databag.
-
- Args:
- event: relation changed event.
- bucket: bucket of the databag (app or unit)
-
- Returns:
- a Diff instance containing the added, deleted and changed
- keys from the event relation databag.
- """
- # Retrieve the old data from the data key in the application relation databag.
- old_data = json.loads(event.relation.data[bucket].get("data", "{}"))
- # Retrieve the new data from the event relation databag.
- new_data = {
- key: value for key, value in event.relation.data[event.app].items() if key != "data"
- }
-
- # These are the keys that were added to the databag and triggered this event.
- added = new_data.keys() - old_data.keys()
- # These are the keys that were removed from the databag and triggered this event.
- deleted = old_data.keys() - new_data.keys()
- # These are the keys that already existed in the databag,
- # but had their values changed.
- changed = {key for key in old_data.keys() & new_data.keys() if old_data[key] != new_data[key]}
- # Convert the new_data to a serializable format and save it for a next diff check.
- event.relation.data[bucket].update({"data": json.dumps(new_data)})
-
- # Return the diff with all possible changes.
- return Diff(added, changed, deleted)
-
-
-# Base DataProvides and DataRequires
-
-
-class DataProvides(Object, ABC):
- """Base provides-side of the data products relation."""
-
- def __init__(self, charm: CharmBase, relation_name: str) -> None:
- super().__init__(charm, relation_name)
- self.charm = charm
- self.local_app = self.charm.model.app
- self.local_unit = self.charm.unit
- self.relation_name = relation_name
- self.framework.observe(
- charm.on[relation_name].relation_changed,
- self._on_relation_changed,
- )
-
- def _diff(self, event: RelationChangedEvent) -> Diff:
- """Retrieves the diff of the data in the relation changed databag.
-
- Args:
- event: relation changed event.
-
- Returns:
- a Diff instance containing the added, deleted and changed
- keys from the event relation databag.
- """
- return diff(event, self.local_app)
-
- @abstractmethod
- def _on_relation_changed(self, event: RelationChangedEvent) -> None:
- """Event emitted when the relation data has changed."""
- raise NotImplementedError
-
- def fetch_relation_data(self) -> dict:
- """Retrieves data from relation.
-
- This function can be used to retrieve data from a relation
- in the charm code when outside an event callback.
-
- Returns:
- a dict of the values stored in the relation data bag
- for all relation instances (indexed by the relation id).
- """
- data = {}
- for relation in self.relations:
- data[relation.id] = {
- key: value for key, value in relation.data[relation.app].items() if key != "data"
- }
- return data
-
- def _update_relation_data(self, relation_id: int, data: dict) -> None:
- """Updates a set of key-value pairs in the relation.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- data: dict containing the key-value pairs
- that should be updated in the relation.
- """
- if self.local_unit.is_leader():
- relation = self.charm.model.get_relation(self.relation_name, relation_id)
- relation.data[self.local_app].update(data)
-
- @property
- def relations(self) -> List[Relation]:
- """The list of Relation instances associated with this relation_name."""
- return list(self.charm.model.relations[self.relation_name])
-
- def set_credentials(self, relation_id: int, username: str, password: str) -> None:
- """Set credentials.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- username: user that was created.
- password: password of the created user.
- """
- self._update_relation_data(
- relation_id,
- {
- "username": username,
- "password": password,
- },
- )
-
- def set_tls(self, relation_id: int, tls: str) -> None:
- """Set whether TLS is enabled.
-
- Args:
- relation_id: the identifier for a particular relation.
- tls: whether tls is enabled (True or False).
- """
- self._update_relation_data(relation_id, {"tls": tls})
-
- def set_tls_ca(self, relation_id: int, tls_ca: str) -> None:
- """Set the TLS CA in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- tls_ca: TLS certification authority.
- """
- self._update_relation_data(relation_id, {"tls_ca": tls_ca})
-
-
-class DataRequires(Object, ABC):
- """Requires-side of the relation."""
-
- def __init__(
- self,
- charm,
- relation_name: str,
- extra_user_roles: str = None,
- ):
- """Manager of base client relations."""
- super().__init__(charm, relation_name)
- self.charm = charm
- self.extra_user_roles = extra_user_roles
- self.local_app = self.charm.model.app
- self.local_unit = self.charm.unit
- self.relation_name = relation_name
- self.framework.observe(
- self.charm.on[relation_name].relation_joined, self._on_relation_joined_event
- )
- self.framework.observe(
- self.charm.on[relation_name].relation_changed, self._on_relation_changed_event
- )
-
- @abstractmethod
- def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None:
- """Event emitted when the application joins the relation."""
- raise NotImplementedError
-
- @abstractmethod
- def _on_relation_changed_event(self, event: RelationChangedEvent) -> None:
- raise NotImplementedError
-
- def fetch_relation_data(self) -> dict:
- """Retrieves data from relation.
-
- This function can be used to retrieve data from a relation
- in the charm code when outside an event callback.
- Function cannot be used in `*-relation-broken` events and will raise an exception.
-
- Returns:
- a dict of the values stored in the relation data bag
- for all relation instances (indexed by the relation ID).
- """
- data = {}
- for relation in self.relations:
- data[relation.id] = {
- key: value for key, value in relation.data[relation.app].items() if key != "data"
- }
- return data
-
- def _update_relation_data(self, relation_id: int, data: dict) -> None:
- """Updates a set of key-value pairs in the relation.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- data: dict containing the key-value pairs
- that should be updated in the relation.
- """
- if self.local_unit.is_leader():
- relation = self.charm.model.get_relation(self.relation_name, relation_id)
- relation.data[self.local_app].update(data)
-
- def _diff(self, event: RelationChangedEvent) -> Diff:
- """Retrieves the diff of the data in the relation changed databag.
-
- Args:
- event: relation changed event.
-
- Returns:
- a Diff instance containing the added, deleted and changed
- keys from the event relation databag.
- """
- return diff(event, self.local_unit)
-
- @property
- def relations(self) -> List[Relation]:
- """The list of Relation instances associated with this relation_name."""
- return [
- relation
- for relation in self.charm.model.relations[self.relation_name]
- if self._is_relation_active(relation)
- ]
-
- @staticmethod
- def _is_relation_active(relation: Relation):
- try:
- _ = repr(relation.data)
- return True
- except RuntimeError:
- return False
-
- @staticmethod
- def _is_resource_created_for_relation(relation: Relation):
- return (
- "username" in relation.data[relation.app] and "password" in relation.data[relation.app]
- )
-
- def is_resource_created(self, relation_id: Optional[int] = None) -> bool:
- """Check if the resource has been created.
-
- This function can be used to check if the Provider answered with data in the charm code
- when outside an event callback.
-
- Args:
- relation_id (int, optional): When provided the check is done only for the relation id
- provided, otherwise the check is done for all relations
-
- Returns:
- True or False
-
- Raises:
- IndexError: If relation_id is provided but that relation does not exist
- """
- if relation_id is not None:
- try:
- relation = [relation for relation in self.relations if relation.id == relation_id][
- 0
- ]
- return self._is_resource_created_for_relation(relation)
- except IndexError:
- raise IndexError(f"relation id {relation_id} cannot be accessed")
- else:
- return (
- all(
- [
- self._is_resource_created_for_relation(relation)
- for relation in self.relations
- ]
- )
- if self.relations
- else False
- )
-
-
-# General events
-
-
-class ExtraRoleEvent(RelationEvent):
- """Base class for data events."""
-
- @property
- def extra_user_roles(self) -> Optional[str]:
- """Returns the extra user roles that were requested."""
- return self.relation.data[self.relation.app].get("extra-user-roles")
-
-
-class AuthenticationEvent(RelationEvent):
- """Base class for authentication fields for events."""
-
- @property
- def username(self) -> Optional[str]:
- """Returns the created username."""
- return self.relation.data[self.relation.app].get("username")
-
- @property
- def password(self) -> Optional[str]:
- """Returns the password for the created user."""
- return self.relation.data[self.relation.app].get("password")
-
- @property
- def tls(self) -> Optional[str]:
- """Returns whether TLS is configured."""
- return self.relation.data[self.relation.app].get("tls")
-
- @property
- def tls_ca(self) -> Optional[str]:
- """Returns TLS CA."""
- return self.relation.data[self.relation.app].get("tls-ca")
-
-
-# Database related events and fields
-
-
-class DatabaseProvidesEvent(RelationEvent):
- """Base class for database events."""
-
- @property
- def database(self) -> Optional[str]:
- """Returns the database that was requested."""
- return self.relation.data[self.relation.app].get("database")
-
-
-class DatabaseRequestedEvent(DatabaseProvidesEvent, ExtraRoleEvent):
- """Event emitted when a new database is requested for use on this relation."""
-
-
-class DatabaseProvidesEvents(CharmEvents):
- """Database events.
-
- This class defines the events that the database can emit.
- """
-
- database_requested = EventSource(DatabaseRequestedEvent)
-
-
-class DatabaseRequiresEvent(RelationEvent):
- """Base class for database events."""
-
- @property
- def endpoints(self) -> Optional[str]:
- """Returns a comma separated list of read/write endpoints."""
- return self.relation.data[self.relation.app].get("endpoints")
-
- @property
- def read_only_endpoints(self) -> Optional[str]:
- """Returns a comma separated list of read only endpoints."""
- return self.relation.data[self.relation.app].get("read-only-endpoints")
-
- @property
- def replset(self) -> Optional[str]:
- """Returns the replicaset name.
-
- MongoDB only.
- """
- return self.relation.data[self.relation.app].get("replset")
-
- @property
- def uris(self) -> Optional[str]:
- """Returns the connection URIs.
-
- MongoDB, Redis, OpenSearch.
- """
- return self.relation.data[self.relation.app].get("uris")
-
- @property
- def version(self) -> Optional[str]:
- """Returns the version of the database.
-
- Version as informed by the database daemon.
- """
- return self.relation.data[self.relation.app].get("version")
-
-
-class DatabaseCreatedEvent(AuthenticationEvent, DatabaseRequiresEvent):
- """Event emitted when a new database is created for use on this relation."""
-
-
-class DatabaseEndpointsChangedEvent(AuthenticationEvent, DatabaseRequiresEvent):
- """Event emitted when the read/write endpoints are changed."""
-
-
-class DatabaseReadOnlyEndpointsChangedEvent(AuthenticationEvent, DatabaseRequiresEvent):
- """Event emitted when the read only endpoints are changed."""
-
-
-class DatabaseRequiresEvents(CharmEvents):
- """Database events.
-
- This class defines the events that the database can emit.
- """
-
- database_created = EventSource(DatabaseCreatedEvent)
- endpoints_changed = EventSource(DatabaseEndpointsChangedEvent)
- read_only_endpoints_changed = EventSource(DatabaseReadOnlyEndpointsChangedEvent)
-
-
-# Database Provider and Requires
-
-
-class DatabaseProvides(DataProvides):
- """Provider-side of the database relations."""
-
- on = DatabaseProvidesEvents()
-
- def __init__(self, charm: CharmBase, relation_name: str) -> None:
- super().__init__(charm, relation_name)
-
- def _on_relation_changed(self, event: RelationChangedEvent) -> None:
- """Event emitted when the relation has changed."""
- # Only the leader should handle this event.
- if not self.local_unit.is_leader():
- return
-
- # Check which data has changed to emit customs events.
- diff = self._diff(event)
-
- # Emit a database requested event if the setup key (database name and optional
- # extra user roles) was added to the relation databag by the application.
- if "database" in diff.added:
- self.on.database_requested.emit(event.relation, app=event.app, unit=event.unit)
-
- def set_endpoints(self, relation_id: int, connection_strings: str) -> None:
- """Set database primary connections.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- connection_strings: database hosts and ports comma separated list.
- """
- self._update_relation_data(relation_id, {"endpoints": connection_strings})
-
- def set_read_only_endpoints(self, relation_id: int, connection_strings: str) -> None:
- """Set database replicas connection strings.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- connection_strings: database hosts and ports comma separated list.
- """
- self._update_relation_data(relation_id, {"read-only-endpoints": connection_strings})
-
- def set_replset(self, relation_id: int, replset: str) -> None:
- """Set replica set name in the application relation databag.
-
- MongoDB only.
-
- Args:
- relation_id: the identifier for a particular relation.
- replset: replica set name.
- """
- self._update_relation_data(relation_id, {"replset": replset})
-
- def set_uris(self, relation_id: int, uris: str) -> None:
- """Set the database connection URIs in the application relation databag.
-
- MongoDB, Redis, and OpenSearch only.
-
- Args:
- relation_id: the identifier for a particular relation.
- uris: connection URIs.
- """
- self._update_relation_data(relation_id, {"uris": uris})
-
- def set_version(self, relation_id: int, version: str) -> None:
- """Set the database version in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- version: database version.
- """
- self._update_relation_data(relation_id, {"version": version})
-
-
-class DatabaseRequires(DataRequires):
- """Requires-side of the database relation."""
-
- on = DatabaseRequiresEvents()
-
- def __init__(
- self,
- charm,
- relation_name: str,
- database_name: str,
- extra_user_roles: str = None,
- relations_aliases: List[str] = None,
- ):
- """Manager of database client relations."""
- super().__init__(charm, relation_name, extra_user_roles)
- self.database = database_name
- self.relations_aliases = relations_aliases
-
- # Define custom event names for each alias.
- if relations_aliases:
- # Ensure the number of aliases does not exceed the maximum
- # of connections allowed in the specific relation.
- relation_connection_limit = self.charm.meta.requires[relation_name].limit
- if len(relations_aliases) != relation_connection_limit:
- raise ValueError(
- f"The number of aliases must match the maximum number of connections allowed in the relation. "
- f"Expected {relation_connection_limit}, got {len(relations_aliases)}"
- )
-
- for relation_alias in relations_aliases:
- self.on.define_event(f"{relation_alias}_database_created", DatabaseCreatedEvent)
- self.on.define_event(
- f"{relation_alias}_endpoints_changed", DatabaseEndpointsChangedEvent
- )
- self.on.define_event(
- f"{relation_alias}_read_only_endpoints_changed",
- DatabaseReadOnlyEndpointsChangedEvent,
- )
-
- def _assign_relation_alias(self, relation_id: int) -> None:
- """Assigns an alias to a relation.
-
- This function writes in the unit data bag.
-
- Args:
- relation_id: the identifier for a particular relation.
- """
- # If no aliases were provided, return immediately.
- if not self.relations_aliases:
- return
-
- # Return if an alias was already assigned to this relation
- # (like when there are more than one unit joining the relation).
- if (
- self.charm.model.get_relation(self.relation_name, relation_id)
- .data[self.local_unit]
- .get("alias")
- ):
- return
-
- # Retrieve the available aliases (the ones that weren't assigned to any relation).
- available_aliases = self.relations_aliases[:]
- for relation in self.charm.model.relations[self.relation_name]:
- alias = relation.data[self.local_unit].get("alias")
- if alias:
- logger.debug("Alias %s was already assigned to relation %d", alias, relation.id)
- available_aliases.remove(alias)
-
- # Set the alias in the unit relation databag of the specific relation.
- relation = self.charm.model.get_relation(self.relation_name, relation_id)
- relation.data[self.local_unit].update({"alias": available_aliases[0]})
-
- def _emit_aliased_event(self, event: RelationChangedEvent, event_name: str) -> None:
- """Emit an aliased event to a particular relation if it has an alias.
-
- Args:
- event: the relation changed event that was received.
- event_name: the name of the event to emit.
- """
- alias = self._get_relation_alias(event.relation.id)
- if alias:
- getattr(self.on, f"{alias}_{event_name}").emit(
- event.relation, app=event.app, unit=event.unit
- )
-
- def _get_relation_alias(self, relation_id: int) -> Optional[str]:
- """Returns the relation alias.
-
- Args:
- relation_id: the identifier for a particular relation.
-
- Returns:
- the relation alias or None if the relation was not found.
- """
- for relation in self.charm.model.relations[self.relation_name]:
- if relation.id == relation_id:
- return relation.data[self.local_unit].get("alias")
- return None
-
- def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None:
- """Event emitted when the application joins the database relation."""
- # If relations aliases were provided, assign one to the relation.
- self._assign_relation_alias(event.relation.id)
-
- # Sets both database and extra user roles in the relation
- # if the roles are provided. Otherwise, sets only the database.
- if self.extra_user_roles:
- self._update_relation_data(
- event.relation.id,
- {
- "database": self.database,
- "extra-user-roles": self.extra_user_roles,
- },
- )
- else:
- self._update_relation_data(event.relation.id, {"database": self.database})
-
- def _on_relation_changed_event(self, event: RelationChangedEvent) -> None:
- """Event emitted when the database relation has changed."""
- # Check which data has changed to emit customs events.
- diff = self._diff(event)
-
- # Check if the database is created
- # (the database charm shared the credentials).
- if "username" in diff.added and "password" in diff.added:
- # Emit the default event (the one without an alias).
- logger.info("database created at %s", datetime.now())
- self.on.database_created.emit(event.relation, app=event.app, unit=event.unit)
-
- # Emit the aliased event (if any).
- self._emit_aliased_event(event, "database_created")
-
- # To avoid unnecessary application restarts do not trigger
- # âendpoints_changedâ event if âdatabase_createdâ is triggered.
- return
-
- # Emit an endpoints changed event if the database
- # added or changed this info in the relation databag.
- if "endpoints" in diff.added or "endpoints" in diff.changed:
- # Emit the default event (the one without an alias).
- logger.info("endpoints changed on %s", datetime.now())
- self.on.endpoints_changed.emit(event.relation, app=event.app, unit=event.unit)
-
- # Emit the aliased event (if any).
- self._emit_aliased_event(event, "endpoints_changed")
-
- # To avoid unnecessary application restarts do not trigger
- # âread_only_endpoints_changedâ event if âendpoints_changedâ is triggered.
- return
-
- # Emit a read only endpoints changed event if the database
- # added or changed this info in the relation databag.
- if "read-only-endpoints" in diff.added or "read-only-endpoints" in diff.changed:
- # Emit the default event (the one without an alias).
- logger.info("read-only-endpoints changed on %s", datetime.now())
- self.on.read_only_endpoints_changed.emit(
- event.relation, app=event.app, unit=event.unit
- )
-
- # Emit the aliased event (if any).
- self._emit_aliased_event(event, "read_only_endpoints_changed")
-
-
-# Kafka related events
-
-
-class KafkaProvidesEvent(RelationEvent):
- """Base class for Kafka events."""
-
- @property
- def topic(self) -> Optional[str]:
- """Returns the topic that was requested."""
- return self.relation.data[self.relation.app].get("topic")
-
-
-class TopicRequestedEvent(KafkaProvidesEvent, ExtraRoleEvent):
- """Event emitted when a new topic is requested for use on this relation."""
-
-
-class KafkaProvidesEvents(CharmEvents):
- """Kafka events.
-
- This class defines the events that the Kafka can emit.
- """
-
- topic_requested = EventSource(TopicRequestedEvent)
-
-
-class KafkaRequiresEvent(RelationEvent):
- """Base class for Kafka events."""
-
- @property
- def bootstrap_server(self) -> Optional[str]:
- """Returns a a comma-seperated list of broker uris."""
- return self.relation.data[self.relation.app].get("endpoints")
-
- @property
- def consumer_group_prefix(self) -> Optional[str]:
- """Returns the consumer-group-prefix."""
- return self.relation.data[self.relation.app].get("consumer-group-prefix")
-
- @property
- def zookeeper_uris(self) -> Optional[str]:
- """Returns a comma separated list of Zookeeper uris."""
- return self.relation.data[self.relation.app].get("zookeeper-uris")
-
-
-class TopicCreatedEvent(AuthenticationEvent, KafkaRequiresEvent):
- """Event emitted when a new topic is created for use on this relation."""
-
-
-class BootstrapServerChangedEvent(AuthenticationEvent, KafkaRequiresEvent):
- """Event emitted when the bootstrap server is changed."""
-
-
-class KafkaRequiresEvents(CharmEvents):
- """Kafka events.
-
- This class defines the events that the Kafka can emit.
- """
-
- topic_created = EventSource(TopicCreatedEvent)
- bootstrap_server_changed = EventSource(BootstrapServerChangedEvent)
-
-
-# Kafka Provides and Requires
-
-
-class KafkaProvides(DataProvides):
- """Provider-side of the Kafka relation."""
-
- on = KafkaProvidesEvents()
-
- def __init__(self, charm: CharmBase, relation_name: str) -> None:
- super().__init__(charm, relation_name)
-
- def _on_relation_changed(self, event: RelationChangedEvent) -> None:
- """Event emitted when the relation has changed."""
- # Only the leader should handle this event.
- if not self.local_unit.is_leader():
- return
-
- # Check which data has changed to emit customs events.
- diff = self._diff(event)
-
- # Emit a topic requested event if the setup key (topic name and optional
- # extra user roles) was added to the relation databag by the application.
- if "topic" in diff.added:
- self.on.topic_requested.emit(event.relation, app=event.app, unit=event.unit)
-
- def set_bootstrap_server(self, relation_id: int, bootstrap_server: str) -> None:
- """Set the bootstrap server in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- bootstrap_server: the bootstrap server address.
- """
- self._update_relation_data(relation_id, {"endpoints": bootstrap_server})
-
- def set_consumer_group_prefix(self, relation_id: int, consumer_group_prefix: str) -> None:
- """Set the consumer group prefix in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- consumer_group_prefix: the consumer group prefix string.
- """
- self._update_relation_data(relation_id, {"consumer-group-prefix": consumer_group_prefix})
-
- def set_zookeeper_uris(self, relation_id: int, zookeeper_uris: str) -> None:
- """Set the zookeeper uris in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- zookeeper_uris: comma-seperated list of ZooKeeper server uris.
- """
- self._update_relation_data(relation_id, {"zookeeper-uris": zookeeper_uris})
-
-
-class KafkaRequires(DataRequires):
- """Requires-side of the Kafka relation."""
-
- on = KafkaRequiresEvents()
-
- def __init__(self, charm, relation_name: str, topic: str, extra_user_roles: str = None):
- """Manager of Kafka client relations."""
- # super().__init__(charm, relation_name)
- super().__init__(charm, relation_name, extra_user_roles)
- self.charm = charm
- self.topic = topic
-
- def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None:
- """Event emitted when the application joins the Kafka relation."""
- # Sets both topic and extra user roles in the relation
- # if the roles are provided. Otherwise, sets only the topic.
- self._update_relation_data(
- event.relation.id,
- {
- "topic": self.topic,
- "extra-user-roles": self.extra_user_roles,
- }
- if self.extra_user_roles is not None
- else {"topic": self.topic},
- )
-
- def _on_relation_changed_event(self, event: RelationChangedEvent) -> None:
- """Event emitted when the Kafka relation has changed."""
- # Check which data has changed to emit customs events.
- diff = self._diff(event)
-
- # Check if the topic is created
- # (the Kafka charm shared the credentials).
- if "username" in diff.added and "password" in diff.added:
- # Emit the default event (the one without an alias).
- logger.info("topic created at %s", datetime.now())
- self.on.topic_created.emit(event.relation, app=event.app, unit=event.unit)
-
- # To avoid unnecessary application restarts do not trigger
- # âendpoints_changedâ event if âtopic_createdâ is triggered.
- return
-
- # Emit an endpoints (bootstap-server) changed event if the Kakfa endpoints
- # added or changed this info in the relation databag.
- if "endpoints" in diff.added or "endpoints" in diff.changed:
- # Emit the default event (the one without an alias).
- logger.info("endpoints changed on %s", datetime.now())
- self.on.bootstrap_server_changed.emit(
- event.relation, app=event.app, unit=event.unit
- ) # here check if this is the right design
- return
diff --git a/installers/charm/osm-nbi/lib/charms/kafka_k8s/v0/kafka.py b/installers/charm/osm-nbi/lib/charms/kafka_k8s/v0/kafka.py
deleted file mode 100644
index aeb5edcb..00000000
--- a/installers/charm/osm-nbi/lib/charms/kafka_k8s/v0/kafka.py
+++ /dev/null
@@ -1,200 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-# See LICENSE file for licensing details.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Kafka library.
-
-This [library](https://juju.is/docs/sdk/libraries) implements both sides of the
-`kafka` [interface](https://juju.is/docs/sdk/relations).
-
-The *provider* side of this interface is implemented by the
-[kafka-k8s Charmed Operator](https://charmhub.io/kafka-k8s).
-
-Any Charmed Operator that *requires* Kafka for providing its
-service should implement the *requirer* side of this interface.
-
-In a nutshell using this library to implement a Charmed Operator *requiring*
-Kafka would look like
-
-```
-$ charmcraft fetch-lib charms.kafka_k8s.v0.kafka
-```
-
-`metadata.yaml`:
-
-```
-requires:
- kafka:
- interface: kafka
- limit: 1
-```
-
-`src/charm.py`:
-
-```
-from charms.kafka_k8s.v0.kafka import KafkaEvents, KafkaRequires
-from ops.charm import CharmBase
-
-
-class MyCharm(CharmBase):
-
- on = KafkaEvents()
-
- def __init__(self, *args):
- super().__init__(*args)
- self.kafka = KafkaRequires(self)
- self.framework.observe(
- self.on.kafka_available,
- self._on_kafka_available,
- )
- self.framework.observe(
- self.on["kafka"].relation_broken,
- self._on_kafka_broken,
- )
-
- def _on_kafka_available(self, event):
- # Get Kafka host and port
- host: str = self.kafka.host
- port: int = self.kafka.port
- # host => "kafka-k8s"
- # port => 9092
-
- def _on_kafka_broken(self, event):
- # Stop service
- # ...
- self.unit.status = BlockedStatus("need kafka relation")
-```
-
-You can file bugs
-[here](https://github.com/charmed-osm/kafka-k8s-operator/issues)!
-"""
-
-from typing import Optional
-
-from ops.charm import CharmBase, CharmEvents
-from ops.framework import EventBase, EventSource, Object
-
-# The unique Charmhub library identifier, never change it
-from ops.model import Relation
-
-LIBID = "eacc8c85082347c9aae740e0220b8376"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 4
-
-
-KAFKA_HOST_APP_KEY = "host"
-KAFKA_PORT_APP_KEY = "port"
-
-
-class _KafkaAvailableEvent(EventBase):
- """Event emitted when Kafka is available."""
-
-
-class KafkaEvents(CharmEvents):
- """Kafka events.
-
- This class defines the events that Kafka can emit.
-
- Events:
- kafka_available (_KafkaAvailableEvent)
- """
-
- kafka_available = EventSource(_KafkaAvailableEvent)
-
-
-class KafkaRequires(Object):
- """Requires-side of the Kafka relation."""
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "kafka") -> None:
- super().__init__(charm, endpoint_name)
- self.charm = charm
- self._endpoint_name = endpoint_name
-
- # Observe relation events
- event_observe_mapping = {
- charm.on[self._endpoint_name].relation_changed: self._on_relation_changed,
- }
- for event, observer in event_observe_mapping.items():
- self.framework.observe(event, observer)
-
- def _on_relation_changed(self, event) -> None:
- if event.relation.app and all(
- key in event.relation.data[event.relation.app]
- for key in (KAFKA_HOST_APP_KEY, KAFKA_PORT_APP_KEY)
- ):
- self.charm.on.kafka_available.emit()
-
- @property
- def host(self) -> str:
- """Get kafka hostname."""
- relation: Relation = self.model.get_relation(self._endpoint_name)
- return (
- relation.data[relation.app].get(KAFKA_HOST_APP_KEY)
- if relation and relation.app
- else None
- )
-
- @property
- def port(self) -> int:
- """Get kafka port number."""
- relation: Relation = self.model.get_relation(self._endpoint_name)
- return (
- int(relation.data[relation.app].get(KAFKA_PORT_APP_KEY))
- if relation and relation.app
- else None
- )
-
-
-class KafkaProvides(Object):
- """Provides-side of the Kafka relation."""
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "kafka") -> None:
- super().__init__(charm, endpoint_name)
- self._endpoint_name = endpoint_name
-
- def set_host_info(self, host: str, port: int, relation: Optional[Relation] = None) -> None:
- """Set Kafka host and port.
-
- This function writes in the application data of the relation, therefore,
- only the unit leader can call it.
-
- Args:
- host (str): Kafka hostname or IP address.
- port (int): Kafka port.
- relation (Optional[Relation]): Relation to update.
- If not specified, all relations will be updated.
-
- Raises:
- Exception: if a non-leader unit calls this function.
- """
- if not self.model.unit.is_leader():
- raise Exception("only the leader set host information.")
-
- if relation:
- self._update_relation_data(host, port, relation)
- return
-
- for relation in self.model.relations[self._endpoint_name]:
- self._update_relation_data(host, port, relation)
-
- def _update_relation_data(self, host: str, port: int, relation: Relation) -> None:
- """Update data in relation if needed."""
- relation.data[self.model.app][KAFKA_HOST_APP_KEY] = host
- relation.data[self.model.app][KAFKA_PORT_APP_KEY] = str(port)
diff --git a/installers/charm/osm-nbi/lib/charms/nginx_ingress_integrator/v0/ingress.py b/installers/charm/osm-nbi/lib/charms/nginx_ingress_integrator/v0/ingress.py
deleted file mode 100644
index be2d762b..00000000
--- a/installers/charm/osm-nbi/lib/charms/nginx_ingress_integrator/v0/ingress.py
+++ /dev/null
@@ -1,229 +0,0 @@
-# See LICENSE file for licensing details.
-# http://www.apache.org/licenses/LICENSE-2.0
-"""Library for the ingress relation.
-
-This library contains the Requires and Provides classes for handling
-the ingress interface.
-
-Import `IngressRequires` in your charm, with two required options:
- - "self" (the charm itself)
- - config_dict
-
-`config_dict` accepts the following keys:
- - service-hostname (required)
- - service-name (required)
- - service-port (required)
- - additional-hostnames
- - limit-rps
- - limit-whitelist
- - max-body-size
- - owasp-modsecurity-crs
- - path-routes
- - retry-errors
- - rewrite-enabled
- - rewrite-target
- - service-namespace
- - session-cookie-max-age
- - tls-secret-name
-
-See [the config section](https://charmhub.io/nginx-ingress-integrator/configure) for descriptions
-of each, along with the required type.
-
-As an example, add the following to `src/charm.py`:
-```
-from charms.nginx_ingress_integrator.v0.ingress import IngressRequires
-
-# In your charm's `__init__` method.
-self.ingress = IngressRequires(self, {"service-hostname": self.config["external_hostname"],
- "service-name": self.app.name,
- "service-port": 80})
-
-# In your charm's `config-changed` handler.
-self.ingress.update_config({"service-hostname": self.config["external_hostname"]})
-```
-And then add the following to `metadata.yaml`:
-```
-requires:
- ingress:
- interface: ingress
-```
-You _must_ register the IngressRequires class as part of the `__init__` method
-rather than, for instance, a config-changed event handler. This is because
-doing so won't get the current relation changed event, because it wasn't
-registered to handle the event (because it wasn't created in `__init__` when
-the event was fired).
-"""
-
-import logging
-
-from ops.charm import CharmEvents
-from ops.framework import EventBase, EventSource, Object
-from ops.model import BlockedStatus
-
-# The unique Charmhub library identifier, never change it
-LIBID = "db0af4367506491c91663468fb5caa4c"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 10
-
-logger = logging.getLogger(__name__)
-
-REQUIRED_INGRESS_RELATION_FIELDS = {
- "service-hostname",
- "service-name",
- "service-port",
-}
-
-OPTIONAL_INGRESS_RELATION_FIELDS = {
- "additional-hostnames",
- "limit-rps",
- "limit-whitelist",
- "max-body-size",
- "owasp-modsecurity-crs",
- "path-routes",
- "retry-errors",
- "rewrite-target",
- "rewrite-enabled",
- "service-namespace",
- "session-cookie-max-age",
- "tls-secret-name",
-}
-
-
-class IngressAvailableEvent(EventBase):
- pass
-
-
-class IngressBrokenEvent(EventBase):
- pass
-
-
-class IngressCharmEvents(CharmEvents):
- """Custom charm events."""
-
- ingress_available = EventSource(IngressAvailableEvent)
- ingress_broken = EventSource(IngressBrokenEvent)
-
-
-class IngressRequires(Object):
- """This class defines the functionality for the 'requires' side of the 'ingress' relation.
-
- Hook events observed:
- - relation-changed
- """
-
- def __init__(self, charm, config_dict):
- super().__init__(charm, "ingress")
-
- self.framework.observe(charm.on["ingress"].relation_changed, self._on_relation_changed)
-
- self.config_dict = config_dict
-
- def _config_dict_errors(self, update_only=False):
- """Check our config dict for errors."""
- blocked_message = "Error in ingress relation, check `juju debug-log`"
- unknown = [
- x
- for x in self.config_dict
- if x not in REQUIRED_INGRESS_RELATION_FIELDS | OPTIONAL_INGRESS_RELATION_FIELDS
- ]
- if unknown:
- logger.error(
- "Ingress relation error, unknown key(s) in config dictionary found: %s",
- ", ".join(unknown),
- )
- self.model.unit.status = BlockedStatus(blocked_message)
- return True
- if not update_only:
- missing = [x for x in REQUIRED_INGRESS_RELATION_FIELDS if x not in self.config_dict]
- if missing:
- logger.error(
- "Ingress relation error, missing required key(s) in config dictionary: %s",
- ", ".join(sorted(missing)),
- )
- self.model.unit.status = BlockedStatus(blocked_message)
- return True
- return False
-
- def _on_relation_changed(self, event):
- """Handle the relation-changed event."""
- # `self.unit` isn't available here, so use `self.model.unit`.
- if self.model.unit.is_leader():
- if self._config_dict_errors():
- return
- for key in self.config_dict:
- event.relation.data[self.model.app][key] = str(self.config_dict[key])
-
- def update_config(self, config_dict):
- """Allow for updates to relation."""
- if self.model.unit.is_leader():
- self.config_dict = config_dict
- if self._config_dict_errors(update_only=True):
- return
- relation = self.model.get_relation("ingress")
- if relation:
- for key in self.config_dict:
- relation.data[self.model.app][key] = str(self.config_dict[key])
-
-
-class IngressProvides(Object):
- """This class defines the functionality for the 'provides' side of the 'ingress' relation.
-
- Hook events observed:
- - relation-changed
- """
-
- def __init__(self, charm):
- super().__init__(charm, "ingress")
- # Observe the relation-changed hook event and bind
- # self.on_relation_changed() to handle the event.
- self.framework.observe(charm.on["ingress"].relation_changed, self._on_relation_changed)
- self.framework.observe(charm.on["ingress"].relation_broken, self._on_relation_broken)
- self.charm = charm
-
- def _on_relation_changed(self, event):
- """Handle a change to the ingress relation.
-
- Confirm we have the fields we expect to receive."""
- # `self.unit` isn't available here, so use `self.model.unit`.
- if not self.model.unit.is_leader():
- return
-
- ingress_data = {
- field: event.relation.data[event.app].get(field)
- for field in REQUIRED_INGRESS_RELATION_FIELDS | OPTIONAL_INGRESS_RELATION_FIELDS
- }
-
- missing_fields = sorted(
- [
- field
- for field in REQUIRED_INGRESS_RELATION_FIELDS
- if ingress_data.get(field) is None
- ]
- )
-
- if missing_fields:
- logger.error(
- "Missing required data fields for ingress relation: {}".format(
- ", ".join(missing_fields)
- )
- )
- self.model.unit.status = BlockedStatus(
- "Missing fields for ingress: {}".format(", ".join(missing_fields))
- )
-
- # Create an event that our charm can use to decide it's okay to
- # configure the ingress.
- self.charm.on.ingress_available.emit()
-
- def _on_relation_broken(self, _):
- """Handle a relation-broken event in the ingress relation."""
- if not self.model.unit.is_leader():
- return
-
- # Create an event that our charm can use to remove the ingress resource.
- self.charm.on.ingress_broken.emit()
diff --git a/installers/charm/osm-nbi/lib/charms/observability_libs/v1/kubernetes_service_patch.py b/installers/charm/osm-nbi/lib/charms/observability_libs/v1/kubernetes_service_patch.py
deleted file mode 100644
index 506dbf03..00000000
--- a/installers/charm/osm-nbi/lib/charms/observability_libs/v1/kubernetes_service_patch.py
+++ /dev/null
@@ -1,291 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-# See LICENSE file for licensing details.
-# http://www.apache.org/licenses/LICENSE-2.0
-
-"""# KubernetesServicePatch Library.
-
-This library is designed to enable developers to more simply patch the Kubernetes Service created
-by Juju during the deployment of a sidecar charm. When sidecar charms are deployed, Juju creates a
-service named after the application in the namespace (named after the Juju model). This service by
-default contains a "placeholder" port, which is 65536/TCP.
-
-When modifying the default set of resources managed by Juju, one must consider the lifecycle of the
-charm. In this case, any modifications to the default service (created during deployment), will be
-overwritten during a charm upgrade.
-
-When initialised, this library binds a handler to the parent charm's `install` and `upgrade_charm`
-events which applies the patch to the cluster. This should ensure that the service ports are
-correct throughout the charm's life.
-
-The constructor simply takes a reference to the parent charm, and a list of
-[`lightkube`](https://github.com/gtsystem/lightkube) ServicePorts that each define a port for the
-service. For information regarding the `lightkube` `ServicePort` model, please visit the
-`lightkube` [docs](https://gtsystem.github.io/lightkube-models/1.23/models/core_v1/#serviceport).
-
-Optionally, a name of the service (in case service name needs to be patched as well), labels,
-selectors, and annotations can be provided as keyword arguments.
-
-## Getting Started
-
-To get started using the library, you just need to fetch the library using `charmcraft`. **Note
-that you also need to add `lightkube` and `lightkube-models` to your charm's `requirements.txt`.**
-
-```shell
-cd some-charm
-charmcraft fetch-lib charms.observability_libs.v0.kubernetes_service_patch
-echo <<-EOF >> requirements.txt
-lightkube
-lightkube-models
-EOF
-```
-
-Then, to initialise the library:
-
-For `ClusterIP` services:
-
-```python
-# ...
-from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch
-from lightkube.models.core_v1 import ServicePort
-
-class SomeCharm(CharmBase):
- def __init__(self, *args):
- # ...
- port = ServicePort(443, name=f"{self.app.name}")
- self.service_patcher = KubernetesServicePatch(self, [port])
- # ...
-```
-
-For `LoadBalancer`/`NodePort` services:
-
-```python
-# ...
-from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch
-from lightkube.models.core_v1 import ServicePort
-
-class SomeCharm(CharmBase):
- def __init__(self, *args):
- # ...
- port = ServicePort(443, name=f"{self.app.name}", targetPort=443, nodePort=30666)
- self.service_patcher = KubernetesServicePatch(
- self, [port], "LoadBalancer"
- )
- # ...
-```
-
-Port protocols can also be specified. Valid protocols are `"TCP"`, `"UDP"`, and `"SCTP"`
-
-```python
-# ...
-from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch
-from lightkube.models.core_v1 import ServicePort
-
-class SomeCharm(CharmBase):
- def __init__(self, *args):
- # ...
- tcp = ServicePort(443, name=f"{self.app.name}-tcp", protocol="TCP")
- udp = ServicePort(443, name=f"{self.app.name}-udp", protocol="UDP")
- sctp = ServicePort(443, name=f"{self.app.name}-sctp", protocol="SCTP")
- self.service_patcher = KubernetesServicePatch(self, [tcp, udp, sctp])
- # ...
-```
-
-Additionally, you may wish to use mocks in your charm's unit testing to ensure that the library
-does not try to make any API calls, or open any files during testing that are unlikely to be
-present, and could break your tests. The easiest way to do this is during your test `setUp`:
-
-```python
-# ...
-
-@patch("charm.KubernetesServicePatch", lambda x, y: None)
-def setUp(self, *unused):
- self.harness = Harness(SomeCharm)
- # ...
-```
-"""
-
-import logging
-from types import MethodType
-from typing import List, Literal
-
-from lightkube import ApiError, Client
-from lightkube.models.core_v1 import ServicePort, ServiceSpec
-from lightkube.models.meta_v1 import ObjectMeta
-from lightkube.resources.core_v1 import Service
-from lightkube.types import PatchType
-from ops.charm import CharmBase
-from ops.framework import Object
-
-logger = logging.getLogger(__name__)
-
-# The unique Charmhub library identifier, never change it
-LIBID = "0042f86d0a874435adef581806cddbbb"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 1
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 1
-
-ServiceType = Literal["ClusterIP", "LoadBalancer"]
-
-
-class KubernetesServicePatch(Object):
- """A utility for patching the Kubernetes service set up by Juju."""
-
- def __init__(
- self,
- charm: CharmBase,
- ports: List[ServicePort],
- service_name: str = None,
- service_type: ServiceType = "ClusterIP",
- additional_labels: dict = None,
- additional_selectors: dict = None,
- additional_annotations: dict = None,
- ):
- """Constructor for KubernetesServicePatch.
-
- Args:
- charm: the charm that is instantiating the library.
- ports: a list of ServicePorts
- service_name: allows setting custom name to the patched service. If none given,
- application name will be used.
- service_type: desired type of K8s service. Default value is in line with ServiceSpec's
- default value.
- additional_labels: Labels to be added to the kubernetes service (by default only
- "app.kubernetes.io/name" is set to the service name)
- additional_selectors: Selectors to be added to the kubernetes service (by default only
- "app.kubernetes.io/name" is set to the service name)
- additional_annotations: Annotations to be added to the kubernetes service.
- """
- super().__init__(charm, "kubernetes-service-patch")
- self.charm = charm
- self.service_name = service_name if service_name else self._app
- self.service = self._service_object(
- ports,
- service_name,
- service_type,
- additional_labels,
- additional_selectors,
- additional_annotations,
- )
-
- # Make mypy type checking happy that self._patch is a method
- assert isinstance(self._patch, MethodType)
- # Ensure this patch is applied during the 'install' and 'upgrade-charm' events
- self.framework.observe(charm.on.install, self._patch)
- self.framework.observe(charm.on.upgrade_charm, self._patch)
-
- def _service_object(
- self,
- ports: List[ServicePort],
- service_name: str = None,
- service_type: ServiceType = "ClusterIP",
- additional_labels: dict = None,
- additional_selectors: dict = None,
- additional_annotations: dict = None,
- ) -> Service:
- """Creates a valid Service representation.
-
- Args:
- ports: a list of ServicePorts
- service_name: allows setting custom name to the patched service. If none given,
- application name will be used.
- service_type: desired type of K8s service. Default value is in line with ServiceSpec's
- default value.
- additional_labels: Labels to be added to the kubernetes service (by default only
- "app.kubernetes.io/name" is set to the service name)
- additional_selectors: Selectors to be added to the kubernetes service (by default only
- "app.kubernetes.io/name" is set to the service name)
- additional_annotations: Annotations to be added to the kubernetes service.
-
- Returns:
- Service: A valid representation of a Kubernetes Service with the correct ports.
- """
- if not service_name:
- service_name = self._app
- labels = {"app.kubernetes.io/name": self._app}
- if additional_labels:
- labels.update(additional_labels)
- selector = {"app.kubernetes.io/name": self._app}
- if additional_selectors:
- selector.update(additional_selectors)
- return Service(
- apiVersion="v1",
- kind="Service",
- metadata=ObjectMeta(
- namespace=self._namespace,
- name=service_name,
- labels=labels,
- annotations=additional_annotations, # type: ignore[arg-type]
- ),
- spec=ServiceSpec(
- selector=selector,
- ports=ports,
- type=service_type,
- ),
- )
-
- def _patch(self, _) -> None:
- """Patch the Kubernetes service created by Juju to map the correct port.
-
- Raises:
- PatchFailed: if patching fails due to lack of permissions, or otherwise.
- """
- if not self.charm.unit.is_leader():
- return
-
- client = Client()
- try:
- if self.service_name != self._app:
- self._delete_and_create_service(client)
- client.patch(Service, self.service_name, self.service, patch_type=PatchType.MERGE)
- except ApiError as e:
- if e.status.code == 403:
- logger.error("Kubernetes service patch failed: `juju trust` this application.")
- else:
- logger.error("Kubernetes service patch failed: %s", str(e))
- else:
- logger.info("Kubernetes service '%s' patched successfully", self._app)
-
- def _delete_and_create_service(self, client: Client):
- service = client.get(Service, self._app, namespace=self._namespace)
- service.metadata.name = self.service_name # type: ignore[attr-defined]
- service.metadata.resourceVersion = service.metadata.uid = None # type: ignore[attr-defined] # noqa: E501
- client.delete(Service, self._app, namespace=self._namespace)
- client.create(service)
-
- def is_patched(self) -> bool:
- """Reports if the service patch has been applied.
-
- Returns:
- bool: A boolean indicating if the service patch has been applied.
- """
- client = Client()
- # Get the relevant service from the cluster
- service = client.get(Service, name=self.service_name, namespace=self._namespace)
- # Construct a list of expected ports, should the patch be applied
- expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports]
- # Construct a list in the same manner, using the fetched service
- fetched_ports = [(p.port, p.targetPort) for p in service.spec.ports] # type: ignore[attr-defined] # noqa: E501
- return expected_ports == fetched_ports
-
- @property
- def _app(self) -> str:
- """Name of the current Juju application.
-
- Returns:
- str: A string containing the name of the current Juju application.
- """
- return self.charm.app.name
-
- @property
- def _namespace(self) -> str:
- """The Kubernetes namespace we're running in.
-
- Returns:
- str: A string containing the name of the current Kubernetes namespace.
- """
- with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f:
- return f.read().strip()
diff --git a/installers/charm/osm-nbi/lib/charms/osm_libs/v0/utils.py b/installers/charm/osm-nbi/lib/charms/osm_libs/v0/utils.py
deleted file mode 100644
index d739ba68..00000000
--- a/installers/charm/osm-nbi/lib/charms/osm_libs/v0/utils.py
+++ /dev/null
@@ -1,544 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-# See LICENSE file for licensing details.
-# http://www.apache.org/licenses/LICENSE-2.0
-"""OSM Utils Library.
-
-This library offers some utilities made for but not limited to Charmed OSM.
-
-# Getting started
-
-Execute the following command inside your Charmed Operator folder to fetch the library.
-
-```shell
-charmcraft fetch-lib charms.osm_libs.v0.utils
-```
-
-# CharmError Exception
-
-An exception that takes to arguments, the message and the StatusBase class, which are useful
-to set the status of the charm when the exception raises.
-
-Example:
-```shell
-from charms.osm_libs.v0.utils import CharmError
-
-class MyCharm(CharmBase):
- def _on_config_changed(self, _):
- try:
- if not self.config.get("some-option"):
- raise CharmError("need some-option", BlockedStatus)
-
- if not self.mysql_ready:
- raise CharmError("waiting for mysql", WaitingStatus)
-
- # Do stuff...
-
- exception CharmError as e:
- self.unit.status = e.status
-```
-
-# Pebble validations
-
-The `check_container_ready` function checks that a container is ready,
-and therefore Pebble is ready.
-
-The `check_service_active` function checks that a service in a container is running.
-
-Both functions raise a CharmError if the validations fail.
-
-Example:
-```shell
-from charms.osm_libs.v0.utils import check_container_ready, check_service_active
-
-class MyCharm(CharmBase):
- def _on_config_changed(self, _):
- try:
- container: Container = self.unit.get_container("my-container")
- check_container_ready(container)
- check_service_active(container, "my-service")
- # Do stuff...
-
- exception CharmError as e:
- self.unit.status = e.status
-```
-
-# Debug-mode
-
-The debug-mode allows OSM developers to easily debug OSM modules.
-
-Example:
-```shell
-from charms.osm_libs.v0.utils import DebugMode
-
-class MyCharm(CharmBase):
- _stored = StoredState()
-
- def __init__(self, _):
- # ...
- container: Container = self.unit.get_container("my-container")
- hostpaths = [
- HostPath(
- config="module-hostpath",
- container_path="/usr/lib/python3/dist-packages/module"
- ),
- ]
- vscode_workspace_path = "files/vscode-workspace.json"
- self.debug_mode = DebugMode(
- self,
- self._stored,
- container,
- hostpaths,
- vscode_workspace_path,
- )
-
- def _on_update_status(self, _):
- if self.debug_mode.started:
- return
- # ...
-
- def _get_debug_mode_information(self):
- command = self.debug_mode.command
- password = self.debug_mode.password
- return command, password
-```
-
-# More
-
-- Get pod IP with `get_pod_ip()`
-"""
-from dataclasses import dataclass
-import logging
-import secrets
-import socket
-from pathlib import Path
-from typing import List
-
-from lightkube import Client
-from lightkube.models.core_v1 import HostPathVolumeSource, Volume, VolumeMount
-from lightkube.resources.apps_v1 import StatefulSet
-from ops.charm import CharmBase
-from ops.framework import Object, StoredState
-from ops.model import (
- ActiveStatus,
- BlockedStatus,
- Container,
- MaintenanceStatus,
- StatusBase,
- WaitingStatus,
-)
-from ops.pebble import ServiceStatus
-
-# The unique Charmhub library identifier, never change it
-LIBID = "e915908eebee4cdd972d484728adf984"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 5
-
-logger = logging.getLogger(__name__)
-
-
-class CharmError(Exception):
- """Charm Error Exception."""
-
- def __init__(self, message: str, status_class: StatusBase = BlockedStatus) -> None:
- self.message = message
- self.status_class = status_class
- self.status = status_class(message)
-
-
-def check_container_ready(container: Container) -> None:
- """Check Pebble has started in the container.
-
- Args:
- container (Container): Container to be checked.
-
- Raises:
- CharmError: if container is not ready.
- """
- if not container.can_connect():
- raise CharmError("waiting for pebble to start", MaintenanceStatus)
-
-
-def check_service_active(container: Container, service_name: str) -> None:
- """Check if the service is running.
-
- Args:
- container (Container): Container to be checked.
- service_name (str): Name of the service to check.
-
- Raises:
- CharmError: if the service is not running.
- """
- if service_name not in container.get_plan().services:
- raise CharmError(f"{service_name} service not configured yet", WaitingStatus)
-
- if container.get_service(service_name).current != ServiceStatus.ACTIVE:
- raise CharmError(f"{service_name} service is not running")
-
-
-def get_pod_ip() -> str:
- """Get Kubernetes Pod IP.
-
- Returns:
- str: The IP of the Pod.
- """
- return socket.gethostbyname(socket.gethostname())
-
-
-_DEBUG_SCRIPT = r"""#!/bin/bash
-# Install SSH
-
-function download_code(){{
- wget https://go.microsoft.com/fwlink/?LinkID=760868 -O code.deb
-}}
-
-function setup_envs(){{
- grep "source /debug.envs" /root/.bashrc || echo "source /debug.envs" | tee -a /root/.bashrc
-}}
-function setup_ssh(){{
- apt install ssh -y
- cat /etc/ssh/sshd_config |
- grep -E '^PermitRootLogin yes$$' || (
- echo PermitRootLogin yes |
- tee -a /etc/ssh/sshd_config
- )
- service ssh stop
- sleep 3
- service ssh start
- usermod --password $(echo {} | openssl passwd -1 -stdin) root
-}}
-
-function setup_code(){{
- apt install libasound2 -y
- (dpkg -i code.deb || apt-get install -f -y || apt-get install -f -y) && echo Code installed successfully
- code --install-extension ms-python.python --user-data-dir /root
- mkdir -p /root/.vscode-server
- cp -R /root/.vscode/extensions /root/.vscode-server/extensions
-}}
-
-export DEBIAN_FRONTEND=noninteractive
-apt update && apt install wget -y
-download_code &
-setup_ssh &
-setup_envs
-wait
-setup_code &
-wait
-"""
-
-
-@dataclass
-class SubModule:
- """Represent RO Submodules."""
- sub_module_path: str
- container_path: str
-
-
-class HostPath:
- """Represents a hostpath."""
- def __init__(self, config: str, container_path: str, submodules: dict = None) -> None:
- mount_path_items = config.split("-")
- mount_path_items.reverse()
- self.mount_path = "/" + "/".join(mount_path_items)
- self.config = config
- self.sub_module_dict = {}
- if submodules:
- for submodule in submodules.keys():
- self.sub_module_dict[submodule] = SubModule(
- sub_module_path=self.mount_path + "/" + submodule + "/" + submodules[submodule].split("/")[-1],
- container_path=submodules[submodule],
- )
- else:
- self.container_path = container_path
- self.module_name = container_path.split("/")[-1]
-
-class DebugMode(Object):
- """Class to handle the debug-mode."""
-
- def __init__(
- self,
- charm: CharmBase,
- stored: StoredState,
- container: Container,
- hostpaths: List[HostPath] = [],
- vscode_workspace_path: str = "files/vscode-workspace.json",
- ) -> None:
- super().__init__(charm, "debug-mode")
-
- self.charm = charm
- self._stored = stored
- self.hostpaths = hostpaths
- self.vscode_workspace = Path(vscode_workspace_path).read_text()
- self.container = container
-
- self._stored.set_default(
- debug_mode_started=False,
- debug_mode_vscode_command=None,
- debug_mode_password=None,
- )
-
- self.framework.observe(self.charm.on.config_changed, self._on_config_changed)
- self.framework.observe(self.charm.on[container.name].pebble_ready, self._on_config_changed)
- self.framework.observe(self.charm.on.update_status, self._on_update_status)
-
- def _on_config_changed(self, _) -> None:
- """Handler for the config-changed event."""
- if not self.charm.unit.is_leader():
- return
-
- debug_mode_enabled = self.charm.config.get("debug-mode", False)
- action = self.enable if debug_mode_enabled else self.disable
- action()
-
- def _on_update_status(self, _) -> None:
- """Handler for the update-status event."""
- if not self.charm.unit.is_leader() or not self.started:
- return
-
- self.charm.unit.status = ActiveStatus("debug-mode: ready")
-
- @property
- def started(self) -> bool:
- """Indicates whether the debug-mode has started or not."""
- return self._stored.debug_mode_started
-
- @property
- def command(self) -> str:
- """Command to launch vscode."""
- return self._stored.debug_mode_vscode_command
-
- @property
- def password(self) -> str:
- """SSH password."""
- return self._stored.debug_mode_password
-
- def enable(self, service_name: str = None) -> None:
- """Enable debug-mode.
-
- This function mounts hostpaths of the OSM modules (if set), and
- configures the container so it can be easily debugged. The setup
- includes the configuration of SSH, environment variables, and
- VSCode workspace and plugins.
-
- Args:
- service_name (str, optional): Pebble service name which has the desired environment
- variables. Mandatory if there is more than one Pebble service configured.
- """
- hostpaths_to_reconfigure = self._hostpaths_to_reconfigure()
- if self.started and not hostpaths_to_reconfigure:
- self.charm.unit.status = ActiveStatus("debug-mode: ready")
- return
-
- logger.debug("enabling debug-mode")
-
- # Mount hostpaths if set.
- # If hostpaths are mounted, the statefulset will be restarted,
- # and for that reason we return immediately. On restart, the hostpaths
- # won't be mounted and then we can continue and setup the debug-mode.
- if hostpaths_to_reconfigure:
- self.charm.unit.status = MaintenanceStatus("debug-mode: configuring hostpaths")
- self._configure_hostpaths(hostpaths_to_reconfigure)
- return
-
- self.charm.unit.status = MaintenanceStatus("debug-mode: starting")
- password = secrets.token_hex(8)
- self._setup_debug_mode(
- password,
- service_name,
- mounted_hostpaths=[hp for hp in self.hostpaths if self.charm.config.get(hp.config)],
- )
-
- self._stored.debug_mode_vscode_command = self._get_vscode_command(get_pod_ip())
- self._stored.debug_mode_password = password
- self._stored.debug_mode_started = True
- logger.info("debug-mode is ready")
- self.charm.unit.status = ActiveStatus("debug-mode: ready")
-
- def disable(self) -> None:
- """Disable debug-mode."""
- logger.debug("disabling debug-mode")
- current_status = self.charm.unit.status
- hostpaths_unmounted = self._unmount_hostpaths()
-
- if not self._stored.debug_mode_started:
- return
- self._stored.debug_mode_started = False
- self._stored.debug_mode_vscode_command = None
- self._stored.debug_mode_password = None
-
- if not hostpaths_unmounted:
- self.charm.unit.status = current_status
- self._restart()
-
- def _hostpaths_to_reconfigure(self) -> List[HostPath]:
- hostpaths_to_reconfigure: List[HostPath] = []
- client = Client()
- statefulset = client.get(StatefulSet, self.charm.app.name, namespace=self.charm.model.name)
- volumes = statefulset.spec.template.spec.volumes
-
- for hostpath in self.hostpaths:
- hostpath_is_set = True if self.charm.config.get(hostpath.config) else False
- hostpath_already_configured = next(
- (True for volume in volumes if volume.name == hostpath.config), False
- )
- if hostpath_is_set != hostpath_already_configured:
- hostpaths_to_reconfigure.append(hostpath)
-
- return hostpaths_to_reconfigure
-
- def _setup_debug_mode(
- self,
- password: str,
- service_name: str = None,
- mounted_hostpaths: List[HostPath] = [],
- ) -> None:
- services = self.container.get_plan().services
- if not service_name and len(services) != 1:
- raise Exception("Cannot start debug-mode: please set the service_name")
-
- service = None
- if not service_name:
- service_name, service = services.popitem()
- if not service:
- service = services.get(service_name)
-
- logger.debug(f"getting environment variables from service {service_name}")
- environment = service.environment
- environment_file_content = "\n".join(
- [f'export {key}="{value}"' for key, value in environment.items()]
- )
- logger.debug(f"pushing environment file to {self.container.name} container")
- self.container.push("/debug.envs", environment_file_content)
-
- # Push VSCode workspace
- logger.debug(f"pushing vscode workspace to {self.container.name} container")
- self.container.push("/debug.code-workspace", self.vscode_workspace)
-
- # Execute debugging script
- logger.debug(f"pushing debug-mode setup script to {self.container.name} container")
- self.container.push("/debug.sh", _DEBUG_SCRIPT.format(password), permissions=0o777)
- logger.debug(f"executing debug-mode setup script in {self.container.name} container")
- self.container.exec(["/debug.sh"]).wait_output()
- logger.debug(f"stopping service {service_name} in {self.container.name} container")
- self.container.stop(service_name)
-
- # Add symlinks to mounted hostpaths
- for hostpath in mounted_hostpaths:
- logger.debug(f"adding symlink for {hostpath.config}")
- if len(hostpath.sub_module_dict) > 0:
- for sub_module in hostpath.sub_module_dict.keys():
- self.container.exec(["rm", "-rf", hostpath.sub_module_dict[sub_module].container_path]).wait_output()
- self.container.exec(
- [
- "ln",
- "-s",
- hostpath.sub_module_dict[sub_module].sub_module_path,
- hostpath.sub_module_dict[sub_module].container_path,
- ]
- )
-
- else:
- self.container.exec(["rm", "-rf", hostpath.container_path]).wait_output()
- self.container.exec(
- [
- "ln",
- "-s",
- f"{hostpath.mount_path}/{hostpath.module_name}",
- hostpath.container_path,
- ]
- )
-
- def _configure_hostpaths(self, hostpaths: List[HostPath]):
- client = Client()
- statefulset = client.get(StatefulSet, self.charm.app.name, namespace=self.charm.model.name)
-
- for hostpath in hostpaths:
- if self.charm.config.get(hostpath.config):
- self._add_hostpath_to_statefulset(hostpath, statefulset)
- else:
- self._delete_hostpath_from_statefulset(hostpath, statefulset)
-
- client.replace(statefulset)
-
- def _unmount_hostpaths(self) -> bool:
- client = Client()
- hostpath_unmounted = False
- statefulset = client.get(StatefulSet, self.charm.app.name, namespace=self.charm.model.name)
-
- for hostpath in self.hostpaths:
- if self._delete_hostpath_from_statefulset(hostpath, statefulset):
- hostpath_unmounted = True
-
- if hostpath_unmounted:
- client.replace(statefulset)
-
- return hostpath_unmounted
-
- def _add_hostpath_to_statefulset(self, hostpath: HostPath, statefulset: StatefulSet):
- # Add volume
- logger.debug(f"adding volume {hostpath.config} to {self.charm.app.name} statefulset")
- volume = Volume(
- hostpath.config,
- hostPath=HostPathVolumeSource(
- path=self.charm.config[hostpath.config],
- type="Directory",
- ),
- )
- statefulset.spec.template.spec.volumes.append(volume)
-
- # Add volumeMount
- for statefulset_container in statefulset.spec.template.spec.containers:
- if statefulset_container.name != self.container.name:
- continue
-
- logger.debug(
- f"adding volumeMount {hostpath.config} to {self.container.name} container"
- )
- statefulset_container.volumeMounts.append(
- VolumeMount(mountPath=hostpath.mount_path, name=hostpath.config)
- )
-
- def _delete_hostpath_from_statefulset(self, hostpath: HostPath, statefulset: StatefulSet):
- hostpath_unmounted = False
- for volume in statefulset.spec.template.spec.volumes:
-
- if hostpath.config != volume.name:
- continue
-
- # Remove volumeMount
- for statefulset_container in statefulset.spec.template.spec.containers:
- if statefulset_container.name != self.container.name:
- continue
- for volume_mount in statefulset_container.volumeMounts:
- if volume_mount.name != hostpath.config:
- continue
-
- logger.debug(
- f"removing volumeMount {hostpath.config} from {self.container.name} container"
- )
- statefulset_container.volumeMounts.remove(volume_mount)
-
- # Remove volume
- logger.debug(
- f"removing volume {hostpath.config} from {self.charm.app.name} statefulset"
- )
- statefulset.spec.template.spec.volumes.remove(volume)
-
- hostpath_unmounted = True
- return hostpath_unmounted
-
- def _get_vscode_command(
- self,
- pod_ip: str,
- user: str = "root",
- workspace_path: str = "/debug.code-workspace",
- ) -> str:
- return f"code --remote ssh-remote+{user}@{pod_ip} {workspace_path}"
-
- def _restart(self):
- self.container.exec(["kill", "-HUP", "1"])
diff --git a/installers/charm/osm-nbi/lib/charms/osm_nbi/v0/nbi.py b/installers/charm/osm-nbi/lib/charms/osm_nbi/v0/nbi.py
deleted file mode 100644
index 130b6faa..00000000
--- a/installers/charm/osm-nbi/lib/charms/osm_nbi/v0/nbi.py
+++ /dev/null
@@ -1,178 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# Learn more at: https://juju.is/docs/sdk
-
-"""Nbi library.
-
-This [library](https://juju.is/docs/sdk/libraries) implements both sides of the
-`nbi` [interface](https://juju.is/docs/sdk/relations).
-
-The *provider* side of this interface is implemented by the
-[osm-nbi Charmed Operator](https://charmhub.io/osm-nbi).
-
-Any Charmed Operator that *requires* NBI for providing its
-service should implement the *requirer* side of this interface.
-
-In a nutshell using this library to implement a Charmed Operator *requiring*
-NBI would look like
-
-```
-$ charmcraft fetch-lib charms.osm_nbi.v0.nbi
-```
-
-`metadata.yaml`:
-
-```
-requires:
- nbi:
- interface: nbi
- limit: 1
-```
-
-`src/charm.py`:
-
-```
-from charms.osm_nbi.v0.nbi import NbiRequires
-from ops.charm import CharmBase
-
-
-class MyCharm(CharmBase):
-
- def __init__(self, *args):
- super().__init__(*args)
- self.nbi = NbiRequires(self)
- self.framework.observe(
- self.on["nbi"].relation_changed,
- self._on_nbi_relation_changed,
- )
- self.framework.observe(
- self.on["nbi"].relation_broken,
- self._on_nbi_relation_broken,
- )
- self.framework.observe(
- self.on["nbi"].relation_broken,
- self._on_nbi_broken,
- )
-
- def _on_nbi_relation_broken(self, event):
- # Get NBI host and port
- host: str = self.nbi.host
- port: int = self.nbi.port
- # host => "osm-nbi"
- # port => 9999
-
- def _on_nbi_broken(self, event):
- # Stop service
- # ...
- self.unit.status = BlockedStatus("need nbi relation")
-```
-
-You can file bugs
-[here](https://osm.etsi.org/bugzilla/enter_bug.cgi), selecting the `devops` module!
-"""
-from typing import Optional
-
-from ops.charm import CharmBase, CharmEvents
-from ops.framework import EventBase, EventSource, Object
-from ops.model import Relation
-
-
-# The unique Charmhub library identifier, never change it
-LIBID = "8c888f7c869949409e12c16d78ec068b"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 1
-
-NBI_HOST_APP_KEY = "host"
-NBI_PORT_APP_KEY = "port"
-
-
-class NbiRequires(Object): # pragma: no cover
- """Requires-side of the Nbi relation."""
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "nbi") -> None:
- super().__init__(charm, endpoint_name)
- self.charm = charm
- self._endpoint_name = endpoint_name
-
- @property
- def host(self) -> str:
- """Get nbi hostname."""
- relation: Relation = self.model.get_relation(self._endpoint_name)
- return (
- relation.data[relation.app].get(NBI_HOST_APP_KEY)
- if relation and relation.app
- else None
- )
-
- @property
- def port(self) -> int:
- """Get nbi port number."""
- relation: Relation = self.model.get_relation(self._endpoint_name)
- return (
- int(relation.data[relation.app].get(NBI_PORT_APP_KEY))
- if relation and relation.app
- else None
- )
-
-
-class NbiProvides(Object):
- """Provides-side of the Nbi relation."""
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "nbi") -> None:
- super().__init__(charm, endpoint_name)
- self._endpoint_name = endpoint_name
-
- def set_host_info(self, host: str, port: int, relation: Optional[Relation] = None) -> None:
- """Set Nbi host and port.
-
- This function writes in the application data of the relation, therefore,
- only the unit leader can call it.
-
- Args:
- host (str): Nbi hostname or IP address.
- port (int): Nbi port.
- relation (Optional[Relation]): Relation to update.
- If not specified, all relations will be updated.
-
- Raises:
- Exception: if a non-leader unit calls this function.
- """
- if not self.model.unit.is_leader():
- raise Exception("only the leader set host information.")
-
- if relation:
- self._update_relation_data(host, port, relation)
- return
-
- for relation in self.model.relations[self._endpoint_name]:
- self._update_relation_data(host, port, relation)
-
- def _update_relation_data(self, host: str, port: int, relation: Relation) -> None:
- """Update data in relation if needed."""
- relation.data[self.model.app][NBI_HOST_APP_KEY] = host
- relation.data[self.model.app][NBI_PORT_APP_KEY] = str(port)
diff --git a/installers/charm/osm-nbi/metadata.yaml b/installers/charm/osm-nbi/metadata.yaml
deleted file mode 100644
index 8a336c8e..00000000
--- a/installers/charm/osm-nbi/metadata.yaml
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Overview on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-name: osm-nbi
-
-# The following metadata are human-readable and will be published prominently on Charmhub.
-
-display-name: OSM NBI
-
-summary: OSM Northbound Interface (NBI)
-
-description: |
- A Kubernetes operator that deploys the Northbound Interface of OSM.
-
- OSM provides a unified northbound interface (NBI), based on NFV SOL005, which enables
- the full operation of system and the Network Services and Network Slices under its control.
-
- In fact, OSMâs NBI offers the service of managing the lifecycle of Network Services (NS)
- and Network Slices Instances (NSI), providing as a service all the necessary abstractions
- to allow the complete control, operation and supervision of the NS/NSI lifecycle by client
- systems, avoiding the exposure of unnecessary details of its constituent elements.
-
- This charm doesn't make sense on its own.
- See more:
- - https://charmhub.io/osm
-
-containers:
- nbi:
- resource: nbi-image
-
-# This file populates the Resources tab on Charmhub.
-
-resources:
- nbi-image:
- type: oci-image
- description: OCI image for nbi
- upstream-source: opensourcemano/nbi
-
-requires:
- kafka:
- interface: kafka
- limit: 1
- mongodb:
- interface: mongodb_client
- limit: 1
- keystone:
- interface: keystone
- limit: 1
- prometheus:
- interface: prometheus
- limit: 1
- ingress:
- interface: ingress
- limit: 1
-
-provides:
- nbi:
- interface: nbi
diff --git a/installers/charm/osm-nbi/pyproject.toml b/installers/charm/osm-nbi/pyproject.toml
deleted file mode 100644
index 16cf0f4b..00000000
--- a/installers/charm/osm-nbi/pyproject.toml
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-
-# Testing tools configuration
-[tool.coverage.run]
-branch = true
-
-[tool.coverage.report]
-show_missing = true
-
-[tool.pytest.ini_options]
-minversion = "6.0"
-log_cli_level = "INFO"
-
-# Formatting tools configuration
-[tool.black]
-line-length = 99
-target-version = ["py38"]
-
-[tool.isort]
-profile = "black"
-
-# Linting tools configuration
-[tool.flake8]
-max-line-length = 99
-max-doc-length = 99
-max-complexity = 10
-exclude = [".git", "__pycache__", ".tox", "build", "dist", "*.egg_info", "venv"]
-select = ["E", "W", "F", "C", "N", "R", "D", "H"]
-# Ignore W503, E501 because using black creates errors with this
-# Ignore D107 Missing docstring in __init__
-ignore = ["W503", "E501", "D107"]
-# D100, D101, D102, D103: Ignore missing docstrings in tests
-per-file-ignores = ["tests/*:D100,D101,D102,D103,D104"]
-docstring-convention = "google"
diff --git a/installers/charm/osm-nbi/requirements.txt b/installers/charm/osm-nbi/requirements.txt
deleted file mode 100644
index 761edd85..00000000
--- a/installers/charm/osm-nbi/requirements.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-ops < 2.2
-lightkube
-lightkube-models
-git+https://github.com/charmed-osm/config-validator/
diff --git a/installers/charm/osm-nbi/src/charm.py b/installers/charm/osm-nbi/src/charm.py
deleted file mode 100755
index b19beae8..00000000
--- a/installers/charm/osm-nbi/src/charm.py
+++ /dev/null
@@ -1,314 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# Learn more at: https://juju.is/docs/sdk
-
-"""OSM NBI charm.
-
-See more: https://charmhub.io/osm
-"""
-
-import logging
-from typing import Any, Dict
-
-from charms.data_platform_libs.v0.data_interfaces import DatabaseRequires
-from charms.kafka_k8s.v0.kafka import KafkaEvents, KafkaRequires
-from charms.nginx_ingress_integrator.v0.ingress import IngressRequires
-from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch
-from charms.osm_libs.v0.utils import (
- CharmError,
- DebugMode,
- HostPath,
- check_container_ready,
- check_service_active,
-)
-from charms.osm_nbi.v0.nbi import NbiProvides
-from lightkube.models.core_v1 import ServicePort
-from ops.charm import ActionEvent, CharmBase, RelationJoinedEvent
-from ops.framework import StoredState
-from ops.main import main
-from ops.model import ActiveStatus, Container
-
-from legacy_interfaces import KeystoneClient, PrometheusClient
-
-HOSTPATHS = [
- HostPath(
- config="nbi-hostpath",
- container_path="/usr/lib/python3/dist-packages/osm_nbi",
- ),
- HostPath(
- config="common-hostpath",
- container_path="/usr/lib/python3/dist-packages/osm_common",
- ),
-]
-SERVICE_PORT = 9999
-
-logger = logging.getLogger(__name__)
-
-
-class OsmNbiCharm(CharmBase):
- """OSM NBI Kubernetes sidecar charm."""
-
- on = KafkaEvents()
- _stored = StoredState()
-
- def __init__(self, *args):
- super().__init__(*args)
- self.ingress = IngressRequires(
- self,
- {
- "service-hostname": self.external_hostname,
- "service-name": self.app.name,
- "service-port": SERVICE_PORT,
- },
- )
- self.kafka = KafkaRequires(self)
- self.nbi = NbiProvides(self)
- self.mongodb_client = DatabaseRequires(
- self, "mongodb", database_name="osm", extra_user_roles="admin"
- )
- self.prometheus_client = PrometheusClient(self, "prometheus")
- self.keystone_client = KeystoneClient(self, "keystone")
- self._observe_charm_events()
- self.container: Container = self.unit.get_container("nbi")
- self.debug_mode = DebugMode(self, self._stored, self.container, HOSTPATHS)
- self._patch_k8s_service()
-
- @property
- def external_hostname(self) -> str:
- """External hostname property.
-
- Returns:
- str: the external hostname from config.
- If not set, return the ClusterIP service name.
- """
- return self.config.get("external-hostname") or self.app.name
-
- # ---------------------------------------------------------------------------
- # Handlers for Charm Events
- # ---------------------------------------------------------------------------
-
- def _on_config_changed(self, _) -> None:
- """Handler for the config-changed event."""
- try:
- self._validate_config()
- self._check_relations()
- # Check if the container is ready.
- # Eventually it will become ready after the first pebble-ready event.
- check_container_ready(self.container)
-
- if not self.debug_mode.started:
- self._configure_service(self.container)
- self._update_ingress_config()
- self._update_nbi_relation()
- # Update charm status
- self._on_update_status()
- except CharmError as e:
- logger.debug(e.message)
- self.unit.status = e.status
-
- def _on_update_status(self, _=None) -> None:
- """Handler for the update-status event."""
- try:
- self._check_relations()
- if self.debug_mode.started:
- return
- check_container_ready(self.container)
- check_service_active(self.container, "nbi")
- self.unit.status = ActiveStatus()
- except CharmError as e:
- logger.debug(e.message)
- self.unit.status = e.status
-
- def _on_required_relation_broken(self, _) -> None:
- """Handler for the kafka-broken event."""
- # Check Pebble has started in the container
- try:
- check_container_ready(self.container)
- check_service_active(self.container, "nbi")
- self.container.stop("nbi")
- except CharmError:
- pass
- finally:
- self._on_update_status()
-
- def _update_nbi_relation(self, event: RelationJoinedEvent = None) -> None:
- """Handler for the nbi-relation-joined event."""
- if self.unit.is_leader():
- self.nbi.set_host_info(self.app.name, SERVICE_PORT, event.relation if event else None)
-
- def _on_get_debug_mode_information_action(self, event: ActionEvent) -> None:
- """Handler for the get-debug-mode-information action event."""
- if not self.debug_mode.started:
- event.fail("debug-mode has not started. Hint: juju config nbi debug-mode=true")
- return
-
- debug_info = {"command": self.debug_mode.command, "password": self.debug_mode.password}
- event.set_results(debug_info)
-
- # ---------------------------------------------------------------------------
- # Validation and configuration and more
- # ---------------------------------------------------------------------------
-
- def _patch_k8s_service(self) -> None:
- port = ServicePort(SERVICE_PORT, name=f"{self.app.name}")
- self.service_patcher = KubernetesServicePatch(self, [port])
-
- def _observe_charm_events(self) -> None:
- event_handler_mapping = {
- # Core lifecycle events
- self.on.nbi_pebble_ready: self._on_config_changed,
- self.on.config_changed: self._on_config_changed,
- self.on.update_status: self._on_update_status,
- # Relation events
- self.on.kafka_available: self._on_config_changed,
- self.on["kafka"].relation_broken: self._on_required_relation_broken,
- self.mongodb_client.on.database_created: self._on_config_changed,
- self.on["mongodb"].relation_broken: self._on_required_relation_broken,
- # Action events
- self.on.get_debug_mode_information_action: self._on_get_debug_mode_information_action,
- self.on.nbi_relation_joined: self._update_nbi_relation,
- }
- for relation in [self.on[rel_name] for rel_name in ["prometheus", "keystone"]]:
- event_handler_mapping[relation.relation_changed] = self._on_config_changed
- event_handler_mapping[relation.relation_broken] = self._on_required_relation_broken
-
- for event, handler in event_handler_mapping.items():
- self.framework.observe(event, handler)
-
- def _is_database_available(self) -> bool:
- try:
- return self.mongodb_client.is_resource_created()
- except KeyError:
- return False
-
- def _validate_config(self) -> None:
- """Validate charm configuration.
-
- Raises:
- CharmError: if charm configuration is invalid.
- """
- logger.debug("validating charm config")
-
- def _check_relations(self) -> None:
- """Validate charm relations.
-
- Raises:
- CharmError: if charm configuration is invalid.
- """
- logger.debug("check for missing relations")
- missing_relations = []
-
- if not self.kafka.host or not self.kafka.port:
- missing_relations.append("kafka")
- if not self._is_database_available():
- missing_relations.append("mongodb")
- if self.prometheus_client.is_missing_data_in_app():
- missing_relations.append("prometheus")
- if self.keystone_client.is_missing_data_in_app():
- missing_relations.append("keystone")
-
- if missing_relations:
- relations_str = ", ".join(missing_relations)
- one_relation_missing = len(missing_relations) == 1
- error_msg = f'need {relations_str} relation{"" if one_relation_missing else "s"}'
- logger.warning(error_msg)
- raise CharmError(error_msg)
-
- def _update_ingress_config(self) -> None:
- """Update ingress config in relation."""
- ingress_config = {
- "service-hostname": self.external_hostname,
- "max-body-size": self.config["max-body-size"],
- }
- if "tls-secret-name" in self.config:
- ingress_config["tls-secret-name"] = self.config["tls-secret-name"]
- logger.debug(f"updating ingress-config: {ingress_config}")
- self.ingress.update_config(ingress_config)
-
- def _configure_service(self, container: Container) -> None:
- """Add Pebble layer with the nbi service."""
- logger.debug(f"configuring {self.app.name} service")
- container.add_layer("nbi", self._get_layer(), combine=True)
- container.replan()
-
- def _get_layer(self) -> Dict[str, Any]:
- """Get layer for Pebble."""
- return {
- "summary": "nbi layer",
- "description": "pebble config layer for nbi",
- "services": {
- "nbi": {
- "override": "replace",
- "summary": "nbi service",
- "command": "/bin/sh -c 'cd /app/osm_nbi && python3 -m osm_nbi.nbi'", # cd /app/osm_nbi is needed until we upgrade Juju to 3.x
- "startup": "enabled",
- "user": "appuser",
- "group": "appuser",
- "working-dir": "/app/osm_nbi", # This parameter has no effect in juju 2.9.x
- "environment": {
- # General configuration
- "OSMNBI_SERVER_ENABLE_TEST": False,
- "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public",
- # Kafka configuration
- "OSMNBI_MESSAGE_HOST": self.kafka.host,
- "OSMNBI_MESSAGE_PORT": self.kafka.port,
- "OSMNBI_MESSAGE_DRIVER": "kafka",
- # Database configuration
- "OSMNBI_DATABASE_DRIVER": "mongo",
- "OSMNBI_DATABASE_URI": self._get_mongodb_uri(),
- "OSMNBI_DATABASE_COMMONKEY": self.config["database-commonkey"],
- # Storage configuration
- "OSMNBI_STORAGE_DRIVER": "mongo",
- "OSMNBI_STORAGE_PATH": "/app/storage",
- "OSMNBI_STORAGE_COLLECTION": "files",
- "OSMNBI_STORAGE_URI": self._get_mongodb_uri(),
- # Prometheus configuration
- "OSMNBI_PROMETHEUS_HOST": self.prometheus_client.hostname,
- "OSMNBI_PROMETHEUS_PORT": self.prometheus_client.port,
- # Log configuration
- "OSMNBI_LOG_LEVEL": self.config["log-level"],
- # Authentication environments
- "OSMNBI_AUTHENTICATION_BACKEND": "keystone",
- "OSMNBI_AUTHENTICATION_AUTH_URL": self.keystone_client.host,
- "OSMNBI_AUTHENTICATION_AUTH_PORT": self.keystone_client.port,
- "OSMNBI_AUTHENTICATION_USER_DOMAIN_NAME": self.keystone_client.user_domain_name,
- "OSMNBI_AUTHENTICATION_PROJECT_DOMAIN_NAME": self.keystone_client.project_domain_name,
- "OSMNBI_AUTHENTICATION_SERVICE_USERNAME": self.keystone_client.username,
- "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD": self.keystone_client.password,
- "OSMNBI_AUTHENTICATION_SERVICE_PROJECT": self.keystone_client.service,
- # DISABLING INTERNAL SSL SERVER
- "OSMNBI_SERVER_SSL_MODULE": "",
- "OSMNBI_SERVER_SSL_CERTIFICATE": "",
- "OSMNBI_SERVER_SSL_PRIVATE_KEY": "",
- "OSMNBI_SERVER_SSL_PASS_PHRASE": "",
- },
- }
- },
- }
-
- def _get_mongodb_uri(self):
- return list(self.mongodb_client.fetch_relation_data().values())[0]["uris"]
-
-
-if __name__ == "__main__": # pragma: no cover
- main(OsmNbiCharm)
diff --git a/installers/charm/osm-nbi/src/legacy_interfaces.py b/installers/charm/osm-nbi/src/legacy_interfaces.py
deleted file mode 100644
index 5deb3f5f..00000000
--- a/installers/charm/osm-nbi/src/legacy_interfaces.py
+++ /dev/null
@@ -1,205 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-# flake8: noqa
-
-import ops
-
-
-class BaseRelationClient(ops.framework.Object):
- """Requires side of a Kafka Endpoint"""
-
- def __init__(
- self,
- charm: ops.charm.CharmBase,
- relation_name: str,
- mandatory_fields: list = [],
- ):
- super().__init__(charm, relation_name)
- self.relation_name = relation_name
- self.mandatory_fields = mandatory_fields
- self._update_relation()
-
- def get_data_from_unit(self, key: str):
- if not self.relation:
- # This update relation doesn't seem to be needed, but I added it because apparently
- # the data is empty in the unit tests.
- # In reality, the constructor is called in every hook.
- # In the unit tests when doing an update_relation_data, apparently it is not called.
- self._update_relation()
- if self.relation:
- for unit in self.relation.units:
- data = self.relation.data[unit].get(key)
- if data:
- return data
-
- def get_data_from_app(self, key: str):
- if not self.relation or self.relation.app not in self.relation.data:
- # This update relation doesn't seem to be needed, but I added it because apparently
- # the data is empty in the unit tests.
- # In reality, the constructor is called in every hook.
- # In the unit tests when doing an update_relation_data, apparently it is not called.
- self._update_relation()
- if self.relation and self.relation.app in self.relation.data:
- data = self.relation.data[self.relation.app].get(key)
- if data:
- return data
-
- def is_missing_data_in_unit(self):
- return not all([self.get_data_from_unit(field) for field in self.mandatory_fields])
-
- def is_missing_data_in_app(self):
- return not all([self.get_data_from_app(field) for field in self.mandatory_fields])
-
- def _update_relation(self):
- self.relation = self.framework.model.get_relation(self.relation_name)
-
-
-class KeystoneClient(BaseRelationClient):
- """Requires side of a Keystone Endpoint"""
-
- mandatory_fields = [
- "host",
- "port",
- "user_domain_name",
- "project_domain_name",
- "username",
- "password",
- "service",
- "keystone_db_password",
- "region_id",
- "admin_username",
- "admin_password",
- "admin_project_name",
- ]
-
- def __init__(self, charm: ops.charm.CharmBase, relation_name: str):
- super().__init__(charm, relation_name, self.mandatory_fields)
-
- @property
- def host(self):
- return self.get_data_from_app("host")
-
- @property
- def port(self):
- return self.get_data_from_app("port")
-
- @property
- def user_domain_name(self):
- return self.get_data_from_app("user_domain_name")
-
- @property
- def project_domain_name(self):
- return self.get_data_from_app("project_domain_name")
-
- @property
- def username(self):
- return self.get_data_from_app("username")
-
- @property
- def password(self):
- return self.get_data_from_app("password")
-
- @property
- def service(self):
- return self.get_data_from_app("service")
-
- @property
- def keystone_db_password(self):
- return self.get_data_from_app("keystone_db_password")
-
- @property
- def region_id(self):
- return self.get_data_from_app("region_id")
-
- @property
- def admin_username(self):
- return self.get_data_from_app("admin_username")
-
- @property
- def admin_password(self):
- return self.get_data_from_app("admin_password")
-
- @property
- def admin_project_name(self):
- return self.get_data_from_app("admin_project_name")
-
-
-class MongoClient(BaseRelationClient):
- """Requires side of a Mongo Endpoint"""
-
- mandatory_fields_mapping = {
- "reactive": ["connection_string"],
- "ops": ["replica_set_uri", "replica_set_name"],
- }
-
- def __init__(self, charm: ops.charm.CharmBase, relation_name: str):
- super().__init__(charm, relation_name, mandatory_fields=[])
-
- @property
- def connection_string(self):
- if self.is_opts():
- replica_set_uri = self.get_data_from_unit("replica_set_uri")
- replica_set_name = self.get_data_from_unit("replica_set_name")
- return f"{replica_set_uri}?replicaSet={replica_set_name}"
- else:
- return self.get_data_from_unit("connection_string")
-
- def is_opts(self):
- return not self.is_missing_data_in_unit_ops()
-
- def is_missing_data_in_unit(self):
- return self.is_missing_data_in_unit_ops() and self.is_missing_data_in_unit_reactive()
-
- def is_missing_data_in_unit_ops(self):
- return not all(
- [self.get_data_from_unit(field) for field in self.mandatory_fields_mapping["ops"]]
- )
-
- def is_missing_data_in_unit_reactive(self):
- return not all(
- [self.get_data_from_unit(field) for field in self.mandatory_fields_mapping["reactive"]]
- )
-
-
-class PrometheusClient(BaseRelationClient):
- """Requires side of a Prometheus Endpoint"""
-
- mandatory_fields = ["hostname", "port"]
-
- def __init__(self, charm: ops.charm.CharmBase, relation_name: str):
- super().__init__(charm, relation_name, self.mandatory_fields)
-
- @property
- def hostname(self):
- return self.get_data_from_app("hostname")
-
- @property
- def port(self):
- return self.get_data_from_app("port")
-
- @property
- def user(self):
- return self.get_data_from_app("user")
-
- @property
- def password(self):
- return self.get_data_from_app("password")
diff --git a/installers/charm/osm-nbi/tests/integration/test_charm.py b/installers/charm/osm-nbi/tests/integration/test_charm.py
deleted file mode 100644
index 85551758..00000000
--- a/installers/charm/osm-nbi/tests/integration/test_charm.py
+++ /dev/null
@@ -1,203 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-# Learn more about testing at: https://juju.is/docs/sdk/testing
-
-import asyncio
-import logging
-import shlex
-from pathlib import Path
-
-import pytest
-import yaml
-from pytest_operator.plugin import OpsTest
-
-logger = logging.getLogger(__name__)
-
-METADATA = yaml.safe_load(Path("./metadata.yaml").read_text())
-NBI_APP = METADATA["name"]
-KAFKA_CHARM = "kafka-k8s"
-KAFKA_APP = "kafka"
-MARIADB_CHARM = "charmed-osm-mariadb-k8s"
-MARIADB_APP = "mariadb"
-MONGO_DB_CHARM = "mongodb-k8s"
-MONGO_DB_APP = "mongodb"
-KEYSTONE_CHARM = "osm-keystone"
-KEYSTONE_APP = "keystone"
-PROMETHEUS_CHARM = "osm-prometheus"
-PROMETHEUS_APP = "prometheus"
-ZOOKEEPER_CHARM = "zookeeper-k8s"
-ZOOKEEPER_APP = "zookeeper"
-INGRESS_CHARM = "nginx-ingress-integrator"
-INGRESS_APP = "ingress"
-APPS = [KAFKA_APP, MONGO_DB_APP, MARIADB_APP, ZOOKEEPER_APP, KEYSTONE_APP, PROMETHEUS_APP, NBI_APP]
-
-
-@pytest.mark.abort_on_fail
-async def test_nbi_is_deployed(ops_test: OpsTest):
- charm = await ops_test.build_charm(".")
- resources = {"nbi-image": METADATA["resources"]["nbi-image"]["upstream-source"]}
-
- await asyncio.gather(
- ops_test.model.deploy(
- charm, resources=resources, application_name=NBI_APP, series="jammy"
- ),
- ops_test.model.deploy(KAFKA_CHARM, application_name=KAFKA_APP, channel="stable"),
- ops_test.model.deploy(MONGO_DB_CHARM, application_name=MONGO_DB_APP, channel="5/edge"),
- ops_test.model.deploy(MARIADB_CHARM, application_name=MARIADB_APP, channel="stable"),
- ops_test.model.deploy(ZOOKEEPER_CHARM, application_name=ZOOKEEPER_APP, channel="stable"),
- ops_test.model.deploy(PROMETHEUS_CHARM, application_name=PROMETHEUS_APP, channel="stable"),
- )
- # Keystone charm has to be deployed differently since
- # bug https://github.com/juju/python-libjuju/issues/766
- # prevents setting correctly the resources
- keystone_image = "opensourcemano/keystone:testing-daily"
- cmd = f"juju deploy {KEYSTONE_CHARM} {KEYSTONE_APP} --resource keystone-image={keystone_image} --channel=latest/beta --series jammy"
- await ops_test.run(*shlex.split(cmd), check=True)
-
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS,
- )
- assert ops_test.model.applications[NBI_APP].status == "blocked"
- unit = ops_test.model.applications[NBI_APP].units[0]
- assert unit.workload_status_message == "need kafka, mongodb, prometheus, keystone relations"
-
- logger.info("Adding relations for other components")
- await ops_test.model.add_relation(KAFKA_APP, ZOOKEEPER_APP)
- await ops_test.model.add_relation(MARIADB_APP, KEYSTONE_APP)
-
- logger.info("Adding relations for NBI")
- await ops_test.model.add_relation(
- "{}:mongodb".format(NBI_APP), "{}:database".format(MONGO_DB_APP)
- )
- await ops_test.model.add_relation(NBI_APP, KAFKA_APP)
- await ops_test.model.add_relation(NBI_APP, PROMETHEUS_APP)
- await ops_test.model.add_relation(NBI_APP, KEYSTONE_APP)
-
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS,
- status="active",
- )
-
-
-@pytest.mark.abort_on_fail
-async def test_nbi_scales_up(ops_test: OpsTest):
- logger.info("Scaling up osm-nbi")
- expected_units = 3
- assert len(ops_test.model.applications[NBI_APP].units) == 1
- await ops_test.model.applications[NBI_APP].scale(expected_units)
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=[NBI_APP], status="active", wait_for_exact_units=expected_units
- )
-
-
-@pytest.mark.abort_on_fail
-@pytest.mark.parametrize(
- "relation_to_remove", [KAFKA_APP, MONGO_DB_APP, PROMETHEUS_APP, KEYSTONE_APP]
-)
-async def test_nbi_blocks_without_relation(ops_test: OpsTest, relation_to_remove):
- logger.info("Removing relation: %s", relation_to_remove)
- # mongoDB relation is named "database"
- local_relation = relation_to_remove
- if local_relation == MONGO_DB_APP:
- local_relation = "database"
- await asyncio.gather(
- ops_test.model.applications[relation_to_remove].remove_relation(local_relation, NBI_APP)
- )
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(apps=[NBI_APP])
- assert ops_test.model.applications[NBI_APP].status == "blocked"
- for unit in ops_test.model.applications[NBI_APP].units:
- assert unit.workload_status_message == f"need {relation_to_remove} relation"
- await ops_test.model.add_relation(NBI_APP, relation_to_remove)
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS,
- status="active",
- )
-
-
-@pytest.mark.abort_on_fail
-async def test_nbi_action_debug_mode_disabled(ops_test: OpsTest):
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS,
- status="active",
- )
- logger.info("Running action 'get-debug-mode-information'")
- action = (
- await ops_test.model.applications[NBI_APP]
- .units[0]
- .run_action("get-debug-mode-information")
- )
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(apps=[NBI_APP])
- status = await ops_test.model.get_action_status(uuid_or_prefix=action.entity_id)
- assert status[action.entity_id] == "failed"
-
-
-@pytest.mark.abort_on_fail
-async def test_nbi_action_debug_mode_enabled(ops_test: OpsTest):
- await ops_test.model.applications[NBI_APP].set_config({"debug-mode": "true"})
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS,
- status="active",
- )
- logger.info("Running action 'get-debug-mode-information'")
- # list of units is not ordered
- unit_id = list(
- filter(
- lambda x: (x.entity_id == f"{NBI_APP}/0"), ops_test.model.applications[NBI_APP].units
- )
- )[0]
- action = await unit_id.run_action("get-debug-mode-information")
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(apps=[NBI_APP])
- status = await ops_test.model.get_action_status(uuid_or_prefix=action.entity_id)
- message = await ops_test.model.get_action_output(action_uuid=action.entity_id)
- assert status[action.entity_id] == "completed"
- assert "command" in message
- assert "password" in message
-
-
-@pytest.mark.abort_on_fail
-async def test_nbi_integration_ingress(ops_test: OpsTest):
- # Temporal workaround due to python-libjuju 2.9.42.2 bug fixed in
- # https://github.com/juju/python-libjuju/pull/854
- # To be replaced when juju version 2.9.43 is used.
- cmd = f"juju deploy {INGRESS_CHARM} {INGRESS_APP} --channel stable"
- await ops_test.run(*shlex.split(cmd), check=True)
-
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS + [INGRESS_APP],
- )
-
- await ops_test.model.add_relation(NBI_APP, INGRESS_APP)
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS + [INGRESS_APP],
- status="active",
- )
diff --git a/installers/charm/osm-nbi/tests/unit/test_charm.py b/installers/charm/osm-nbi/tests/unit/test_charm.py
deleted file mode 100644
index b1604192..00000000
--- a/installers/charm/osm-nbi/tests/unit/test_charm.py
+++ /dev/null
@@ -1,124 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-# Learn more about testing at: https://juju.is/docs/sdk/testing
-
-import pytest
-from ops.model import ActiveStatus, BlockedStatus
-from ops.testing import Harness
-from pytest_mock import MockerFixture
-
-from charm import CharmError, OsmNbiCharm, check_service_active
-
-container_name = "nbi"
-service_name = "nbi"
-
-
-@pytest.fixture
-def harness(mocker: MockerFixture):
- mocker.patch("charm.KubernetesServicePatch", lambda x, y: None)
- harness = Harness(OsmNbiCharm)
- harness.begin()
- harness.container_pebble_ready(container_name)
- yield harness
- harness.cleanup()
-
-
-def test_missing_relations(harness: Harness):
- harness.charm.on.config_changed.emit()
- assert type(harness.charm.unit.status) == BlockedStatus
- assert all(
- relation in harness.charm.unit.status.message
- for relation in ["mongodb", "kafka", "prometheus", "keystone"]
- )
-
-
-def test_ready(harness: Harness):
- _add_relations(harness)
- assert harness.charm.unit.status == ActiveStatus()
-
-
-def test_container_stops_after_relation_broken(harness: Harness):
- harness.charm.on[container_name].pebble_ready.emit(container_name)
- container = harness.charm.unit.get_container(container_name)
- relation_ids = _add_relations(harness)
- check_service_active(container, service_name)
- harness.remove_relation(relation_ids[0])
- with pytest.raises(CharmError):
- check_service_active(container, service_name)
-
-
-def test_nbi_relation_joined(harness: Harness):
- harness.set_leader(True)
- _add_relations(harness)
- relation_id = harness.add_relation("nbi", "ng-ui")
- harness.add_relation_unit(relation_id, "ng-ui/0")
- relation_data = harness.get_relation_data(relation_id, harness.charm.app.name)
- assert harness.charm.unit.status == ActiveStatus()
- assert relation_data == {"host": harness.charm.app.name, "port": "9999"}
-
-
-def _add_relations(harness: Harness):
- relation_ids = []
- # Add mongo relation
- relation_id = harness.add_relation("mongodb", "mongodb")
- harness.add_relation_unit(relation_id, "mongodb/0")
- harness.update_relation_data(
- relation_id,
- "mongodb",
- {"uris": "mongodb://:1234", "username": "user", "password": "password"},
- )
- relation_ids.append(relation_id)
- # Add kafka relation
- relation_id = harness.add_relation("kafka", "kafka")
- harness.add_relation_unit(relation_id, "kafka/0")
- harness.update_relation_data(relation_id, "kafka", {"host": "kafka", "port": "9092"})
- relation_ids.append(relation_id)
- # Add prometheus relation
- relation_id = harness.add_relation("prometheus", "prometheus")
- harness.add_relation_unit(relation_id, "prometheus/0")
- harness.update_relation_data(
- relation_id, "prometheus", {"hostname": "prometheus", "port": "9090"}
- )
- relation_ids.append(relation_id)
- # Add keystone relation
- relation_id = harness.add_relation("keystone", "keystone")
- harness.add_relation_unit(relation_id, "keystone/0")
- harness.update_relation_data(
- relation_id,
- "keystone",
- {
- "host": "host",
- "port": "port",
- "user_domain_name": "user_domain_name",
- "project_domain_name": "project_domain_name",
- "username": "username",
- "password": "password",
- "service": "service",
- "keystone_db_password": "keystone_db_password",
- "region_id": "region_id",
- "admin_username": "admin_username",
- "admin_password": "admin_password",
- "admin_project_name": "admin_project_name",
- },
- )
- relation_ids.append(relation_id)
- return relation_ids
diff --git a/installers/charm/osm-nbi/tox.ini b/installers/charm/osm-nbi/tox.ini
deleted file mode 100644
index 07ea16dc..00000000
--- a/installers/charm/osm-nbi/tox.ini
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-
-[tox]
-skipsdist=True
-skip_missing_interpreters = True
-envlist = lint, unit, integration
-
-[vars]
-src_path = {toxinidir}/src/
-tst_path = {toxinidir}/tests/
-lib_path = {toxinidir}/lib/charms/osm_nbi
-all_path = {[vars]src_path} {[vars]tst_path}
-
-[testenv]
-basepython = python3.8
-setenv =
- PYTHONPATH = {toxinidir}:{toxinidir}/lib:{[vars]src_path}
- PYTHONBREAKPOINT=ipdb.set_trace
- PY_COLORS=1
-passenv =
- PYTHONPATH
- CHARM_BUILD_DIR
- MODEL_SETTINGS
-
-[testenv:fmt]
-description = Apply coding style standards to code
-deps =
- black
- isort
-commands =
- isort {[vars]all_path}
- black {[vars]all_path}
-
-[testenv:lint]
-description = Check code against coding style standards
-deps =
- black
- flake8
- flake8-docstrings
- flake8-builtins
- pyproject-flake8
- pep8-naming
- isort
- codespell
-commands =
- # uncomment the following line if this charm owns a lib
- codespell {[vars]lib_path}
- codespell {toxinidir} --skip {toxinidir}/.git --skip {toxinidir}/.tox \
- --skip {toxinidir}/build --skip {toxinidir}/lib --skip {toxinidir}/venv \
- --skip {toxinidir}/.mypy_cache --skip {toxinidir}/icon.svg
- # pflake8 wrapper supports config from pyproject.toml
- pflake8 {[vars]all_path}
- isort --check-only --diff {[vars]all_path}
- black --check --diff {[vars]all_path}
-
-[testenv:unit]
-description = Run unit tests
-deps =
- pytest
- pytest-mock
- coverage[toml]
- -r{toxinidir}/requirements.txt
-commands =
- coverage run --source={[vars]src_path},{[vars]lib_path} \
- -m pytest --ignore={[vars]tst_path}integration -v --tb native -s {posargs}
- coverage report
- coverage xml
-
-[testenv:integration]
-description = Run integration tests
-deps =
- pytest
- juju<3
- pytest-operator
- -r{toxinidir}/requirements.txt
-commands =
- pytest -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs} --cloud microk8s
diff --git a/installers/charm/osm-ng-ui/.gitignore b/installers/charm/osm-ng-ui/.gitignore
deleted file mode 100644
index 87d0a587..00000000
--- a/installers/charm/osm-ng-ui/.gitignore
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-venv/
-build/
-*.charm
-.tox/
-.coverage
-coverage.xml
-__pycache__/
-*.py[cod]
-.vscode
\ No newline at end of file
diff --git a/installers/charm/osm-ng-ui/.jujuignore b/installers/charm/osm-ng-ui/.jujuignore
deleted file mode 100644
index 17c7a8bb..00000000
--- a/installers/charm/osm-ng-ui/.jujuignore
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-/venv
-*.py[cod]
-*.charm
diff --git a/installers/charm/osm-ng-ui/CONTRIBUTING.md b/installers/charm/osm-ng-ui/CONTRIBUTING.md
deleted file mode 100644
index 8a91a44c..00000000
--- a/installers/charm/osm-ng-ui/CONTRIBUTING.md
+++ /dev/null
@@ -1,78 +0,0 @@
-
-
-# Contributing
-
-## Overview
-
-This documents explains the processes and practices recommended for contributing enhancements to
-this operator.
-
-- Generally, before developing enhancements to this charm, you should consider [opening an issue
- ](https://osm.etsi.org/bugzilla/enter_bug.cgi?product=OSM) explaining your use case. (Component=devops, version=master)
-- If you would like to chat with us about your use-cases or proposed implementation, you can reach
- us at [OSM Juju public channel](https://opensourcemano.slack.com/archives/C027KJGPECA).
-- Familiarising yourself with the [Charmed Operator Framework](https://juju.is/docs/sdk) library
- will help you a lot when working on new features or bug fixes.
-- All enhancements require review before being merged. Code review typically examines
- - code quality
- - test coverage
- - user experience for Juju administrators this charm.
-- Please help us out in ensuring easy to review branches by rebasing your gerrit patch onto
- the `master` branch.
-
-## Developing
-
-You can use the environments created by `tox` for development:
-
-```shell
-tox --notest -e unit
-source .tox/unit/bin/activate
-```
-
-### Testing
-
-```shell
-tox -e fmt # update your code according to linting rules
-tox -e lint # code style
-tox -e unit # unit tests
-tox -e integration # integration tests
-tox # runs 'lint' and 'unit' environments
-```
-
-## Build charm
-
-Build the charm in this git repository using:
-
-```shell
-charmcraft pack
-```
-
-### Deploy
-
-```bash
-# Create a model
-juju add-model dev
-# Enable DEBUG logging
-juju model-config logging-config="=INFO;unit=DEBUG"
-# Deploy the charm
-juju deploy ./osm-ng-ui_ubuntu-22.04-amd64.charm \
- --resource ng-ui-image=opensourcemano/ng-ui:testing-daily --series jammy
-```
diff --git a/installers/charm/osm-ng-ui/LICENSE b/installers/charm/osm-ng-ui/LICENSE
deleted file mode 100644
index 7e9d5046..00000000
--- a/installers/charm/osm-ng-ui/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2022 Canonical Ltd.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/installers/charm/osm-ng-ui/README.md b/installers/charm/osm-ng-ui/README.md
deleted file mode 100644
index 20a6f767..00000000
--- a/installers/charm/osm-ng-ui/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-
-
-
-
-# OSM NBI
-
-Charmhub package name: osm-ng-ui
-More information: https://charmhub.io/osm-ng-ui
-
-## Other resources
-
-* [Read more](https://osm.etsi.org/docs/user-guide/latest/)
-
-* [Contributing](https://osm.etsi.org/gitweb/?p=osm/devops.git;a=blob;f=installers/charm/osm-ng-ui/CONTRIBUTING.md)
-
-* See the [Juju SDK documentation](https://juju.is/docs/sdk) for more information about developing and improving charms.
-
diff --git a/installers/charm/osm-ng-ui/actions.yaml b/installers/charm/osm-ng-ui/actions.yaml
deleted file mode 100644
index 6d52c053..00000000
--- a/installers/charm/osm-ng-ui/actions.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Actions tab on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
diff --git a/installers/charm/osm-ng-ui/charmcraft.yaml b/installers/charm/osm-ng-ui/charmcraft.yaml
deleted file mode 100644
index 072529c6..00000000
--- a/installers/charm/osm-ng-ui/charmcraft.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-
-type: charm
-bases:
- - build-on:
- - name: "ubuntu"
- channel: "22.04"
- run-on:
- - name: "ubuntu"
- channel: "22.04"
-
-parts:
- charm:
- build-packages:
- - git
diff --git a/installers/charm/osm-ng-ui/config.yaml b/installers/charm/osm-ng-ui/config.yaml
deleted file mode 100644
index 31ffd845..00000000
--- a/installers/charm/osm-ng-ui/config.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Configure tab on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-options:
- # Ingress options
- external-hostname:
- default: ""
- description: |
- The url that will be configured in the Kubernetes ingress.
-
- The easiest way of configuring the external-hostname without having the DNS setup is by using
- a Wildcard DNS like nip.io constructing the url like so:
- - ng-ui.127.0.0.1.nip.io (valid within the K8s cluster node)
- - ng-ui..nip.io (valid from outside the K8s cluster node)
-
- This option is only applicable when the Kubernetes cluster has nginx ingress configured
- and the charm is related to the nginx-ingress-integrator.
- See more: https://charmhub.io/nginx-ingress-integrator
- type: string
- max-body-size:
- default: 20
- description: Max allowed body-size (for file uploads) in megabytes, set to 0 to
- disable limits.
- source: default
- type: int
- value: 20
- tls-secret-name:
- description: TLS secret name to use for ingress.
- type: string
diff --git a/installers/charm/osm-ng-ui/lib/charms/nginx_ingress_integrator/v0/ingress.py b/installers/charm/osm-ng-ui/lib/charms/nginx_ingress_integrator/v0/ingress.py
deleted file mode 100644
index be2d762b..00000000
--- a/installers/charm/osm-ng-ui/lib/charms/nginx_ingress_integrator/v0/ingress.py
+++ /dev/null
@@ -1,229 +0,0 @@
-# See LICENSE file for licensing details.
-# http://www.apache.org/licenses/LICENSE-2.0
-"""Library for the ingress relation.
-
-This library contains the Requires and Provides classes for handling
-the ingress interface.
-
-Import `IngressRequires` in your charm, with two required options:
- - "self" (the charm itself)
- - config_dict
-
-`config_dict` accepts the following keys:
- - service-hostname (required)
- - service-name (required)
- - service-port (required)
- - additional-hostnames
- - limit-rps
- - limit-whitelist
- - max-body-size
- - owasp-modsecurity-crs
- - path-routes
- - retry-errors
- - rewrite-enabled
- - rewrite-target
- - service-namespace
- - session-cookie-max-age
- - tls-secret-name
-
-See [the config section](https://charmhub.io/nginx-ingress-integrator/configure) for descriptions
-of each, along with the required type.
-
-As an example, add the following to `src/charm.py`:
-```
-from charms.nginx_ingress_integrator.v0.ingress import IngressRequires
-
-# In your charm's `__init__` method.
-self.ingress = IngressRequires(self, {"service-hostname": self.config["external_hostname"],
- "service-name": self.app.name,
- "service-port": 80})
-
-# In your charm's `config-changed` handler.
-self.ingress.update_config({"service-hostname": self.config["external_hostname"]})
-```
-And then add the following to `metadata.yaml`:
-```
-requires:
- ingress:
- interface: ingress
-```
-You _must_ register the IngressRequires class as part of the `__init__` method
-rather than, for instance, a config-changed event handler. This is because
-doing so won't get the current relation changed event, because it wasn't
-registered to handle the event (because it wasn't created in `__init__` when
-the event was fired).
-"""
-
-import logging
-
-from ops.charm import CharmEvents
-from ops.framework import EventBase, EventSource, Object
-from ops.model import BlockedStatus
-
-# The unique Charmhub library identifier, never change it
-LIBID = "db0af4367506491c91663468fb5caa4c"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 10
-
-logger = logging.getLogger(__name__)
-
-REQUIRED_INGRESS_RELATION_FIELDS = {
- "service-hostname",
- "service-name",
- "service-port",
-}
-
-OPTIONAL_INGRESS_RELATION_FIELDS = {
- "additional-hostnames",
- "limit-rps",
- "limit-whitelist",
- "max-body-size",
- "owasp-modsecurity-crs",
- "path-routes",
- "retry-errors",
- "rewrite-target",
- "rewrite-enabled",
- "service-namespace",
- "session-cookie-max-age",
- "tls-secret-name",
-}
-
-
-class IngressAvailableEvent(EventBase):
- pass
-
-
-class IngressBrokenEvent(EventBase):
- pass
-
-
-class IngressCharmEvents(CharmEvents):
- """Custom charm events."""
-
- ingress_available = EventSource(IngressAvailableEvent)
- ingress_broken = EventSource(IngressBrokenEvent)
-
-
-class IngressRequires(Object):
- """This class defines the functionality for the 'requires' side of the 'ingress' relation.
-
- Hook events observed:
- - relation-changed
- """
-
- def __init__(self, charm, config_dict):
- super().__init__(charm, "ingress")
-
- self.framework.observe(charm.on["ingress"].relation_changed, self._on_relation_changed)
-
- self.config_dict = config_dict
-
- def _config_dict_errors(self, update_only=False):
- """Check our config dict for errors."""
- blocked_message = "Error in ingress relation, check `juju debug-log`"
- unknown = [
- x
- for x in self.config_dict
- if x not in REQUIRED_INGRESS_RELATION_FIELDS | OPTIONAL_INGRESS_RELATION_FIELDS
- ]
- if unknown:
- logger.error(
- "Ingress relation error, unknown key(s) in config dictionary found: %s",
- ", ".join(unknown),
- )
- self.model.unit.status = BlockedStatus(blocked_message)
- return True
- if not update_only:
- missing = [x for x in REQUIRED_INGRESS_RELATION_FIELDS if x not in self.config_dict]
- if missing:
- logger.error(
- "Ingress relation error, missing required key(s) in config dictionary: %s",
- ", ".join(sorted(missing)),
- )
- self.model.unit.status = BlockedStatus(blocked_message)
- return True
- return False
-
- def _on_relation_changed(self, event):
- """Handle the relation-changed event."""
- # `self.unit` isn't available here, so use `self.model.unit`.
- if self.model.unit.is_leader():
- if self._config_dict_errors():
- return
- for key in self.config_dict:
- event.relation.data[self.model.app][key] = str(self.config_dict[key])
-
- def update_config(self, config_dict):
- """Allow for updates to relation."""
- if self.model.unit.is_leader():
- self.config_dict = config_dict
- if self._config_dict_errors(update_only=True):
- return
- relation = self.model.get_relation("ingress")
- if relation:
- for key in self.config_dict:
- relation.data[self.model.app][key] = str(self.config_dict[key])
-
-
-class IngressProvides(Object):
- """This class defines the functionality for the 'provides' side of the 'ingress' relation.
-
- Hook events observed:
- - relation-changed
- """
-
- def __init__(self, charm):
- super().__init__(charm, "ingress")
- # Observe the relation-changed hook event and bind
- # self.on_relation_changed() to handle the event.
- self.framework.observe(charm.on["ingress"].relation_changed, self._on_relation_changed)
- self.framework.observe(charm.on["ingress"].relation_broken, self._on_relation_broken)
- self.charm = charm
-
- def _on_relation_changed(self, event):
- """Handle a change to the ingress relation.
-
- Confirm we have the fields we expect to receive."""
- # `self.unit` isn't available here, so use `self.model.unit`.
- if not self.model.unit.is_leader():
- return
-
- ingress_data = {
- field: event.relation.data[event.app].get(field)
- for field in REQUIRED_INGRESS_RELATION_FIELDS | OPTIONAL_INGRESS_RELATION_FIELDS
- }
-
- missing_fields = sorted(
- [
- field
- for field in REQUIRED_INGRESS_RELATION_FIELDS
- if ingress_data.get(field) is None
- ]
- )
-
- if missing_fields:
- logger.error(
- "Missing required data fields for ingress relation: {}".format(
- ", ".join(missing_fields)
- )
- )
- self.model.unit.status = BlockedStatus(
- "Missing fields for ingress: {}".format(", ".join(missing_fields))
- )
-
- # Create an event that our charm can use to decide it's okay to
- # configure the ingress.
- self.charm.on.ingress_available.emit()
-
- def _on_relation_broken(self, _):
- """Handle a relation-broken event in the ingress relation."""
- if not self.model.unit.is_leader():
- return
-
- # Create an event that our charm can use to remove the ingress resource.
- self.charm.on.ingress_broken.emit()
diff --git a/installers/charm/osm-ng-ui/lib/charms/observability_libs/v1/kubernetes_service_patch.py b/installers/charm/osm-ng-ui/lib/charms/observability_libs/v1/kubernetes_service_patch.py
deleted file mode 100644
index 506dbf03..00000000
--- a/installers/charm/osm-ng-ui/lib/charms/observability_libs/v1/kubernetes_service_patch.py
+++ /dev/null
@@ -1,291 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-# See LICENSE file for licensing details.
-# http://www.apache.org/licenses/LICENSE-2.0
-
-"""# KubernetesServicePatch Library.
-
-This library is designed to enable developers to more simply patch the Kubernetes Service created
-by Juju during the deployment of a sidecar charm. When sidecar charms are deployed, Juju creates a
-service named after the application in the namespace (named after the Juju model). This service by
-default contains a "placeholder" port, which is 65536/TCP.
-
-When modifying the default set of resources managed by Juju, one must consider the lifecycle of the
-charm. In this case, any modifications to the default service (created during deployment), will be
-overwritten during a charm upgrade.
-
-When initialised, this library binds a handler to the parent charm's `install` and `upgrade_charm`
-events which applies the patch to the cluster. This should ensure that the service ports are
-correct throughout the charm's life.
-
-The constructor simply takes a reference to the parent charm, and a list of
-[`lightkube`](https://github.com/gtsystem/lightkube) ServicePorts that each define a port for the
-service. For information regarding the `lightkube` `ServicePort` model, please visit the
-`lightkube` [docs](https://gtsystem.github.io/lightkube-models/1.23/models/core_v1/#serviceport).
-
-Optionally, a name of the service (in case service name needs to be patched as well), labels,
-selectors, and annotations can be provided as keyword arguments.
-
-## Getting Started
-
-To get started using the library, you just need to fetch the library using `charmcraft`. **Note
-that you also need to add `lightkube` and `lightkube-models` to your charm's `requirements.txt`.**
-
-```shell
-cd some-charm
-charmcraft fetch-lib charms.observability_libs.v0.kubernetes_service_patch
-echo <<-EOF >> requirements.txt
-lightkube
-lightkube-models
-EOF
-```
-
-Then, to initialise the library:
-
-For `ClusterIP` services:
-
-```python
-# ...
-from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch
-from lightkube.models.core_v1 import ServicePort
-
-class SomeCharm(CharmBase):
- def __init__(self, *args):
- # ...
- port = ServicePort(443, name=f"{self.app.name}")
- self.service_patcher = KubernetesServicePatch(self, [port])
- # ...
-```
-
-For `LoadBalancer`/`NodePort` services:
-
-```python
-# ...
-from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch
-from lightkube.models.core_v1 import ServicePort
-
-class SomeCharm(CharmBase):
- def __init__(self, *args):
- # ...
- port = ServicePort(443, name=f"{self.app.name}", targetPort=443, nodePort=30666)
- self.service_patcher = KubernetesServicePatch(
- self, [port], "LoadBalancer"
- )
- # ...
-```
-
-Port protocols can also be specified. Valid protocols are `"TCP"`, `"UDP"`, and `"SCTP"`
-
-```python
-# ...
-from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch
-from lightkube.models.core_v1 import ServicePort
-
-class SomeCharm(CharmBase):
- def __init__(self, *args):
- # ...
- tcp = ServicePort(443, name=f"{self.app.name}-tcp", protocol="TCP")
- udp = ServicePort(443, name=f"{self.app.name}-udp", protocol="UDP")
- sctp = ServicePort(443, name=f"{self.app.name}-sctp", protocol="SCTP")
- self.service_patcher = KubernetesServicePatch(self, [tcp, udp, sctp])
- # ...
-```
-
-Additionally, you may wish to use mocks in your charm's unit testing to ensure that the library
-does not try to make any API calls, or open any files during testing that are unlikely to be
-present, and could break your tests. The easiest way to do this is during your test `setUp`:
-
-```python
-# ...
-
-@patch("charm.KubernetesServicePatch", lambda x, y: None)
-def setUp(self, *unused):
- self.harness = Harness(SomeCharm)
- # ...
-```
-"""
-
-import logging
-from types import MethodType
-from typing import List, Literal
-
-from lightkube import ApiError, Client
-from lightkube.models.core_v1 import ServicePort, ServiceSpec
-from lightkube.models.meta_v1 import ObjectMeta
-from lightkube.resources.core_v1 import Service
-from lightkube.types import PatchType
-from ops.charm import CharmBase
-from ops.framework import Object
-
-logger = logging.getLogger(__name__)
-
-# The unique Charmhub library identifier, never change it
-LIBID = "0042f86d0a874435adef581806cddbbb"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 1
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 1
-
-ServiceType = Literal["ClusterIP", "LoadBalancer"]
-
-
-class KubernetesServicePatch(Object):
- """A utility for patching the Kubernetes service set up by Juju."""
-
- def __init__(
- self,
- charm: CharmBase,
- ports: List[ServicePort],
- service_name: str = None,
- service_type: ServiceType = "ClusterIP",
- additional_labels: dict = None,
- additional_selectors: dict = None,
- additional_annotations: dict = None,
- ):
- """Constructor for KubernetesServicePatch.
-
- Args:
- charm: the charm that is instantiating the library.
- ports: a list of ServicePorts
- service_name: allows setting custom name to the patched service. If none given,
- application name will be used.
- service_type: desired type of K8s service. Default value is in line with ServiceSpec's
- default value.
- additional_labels: Labels to be added to the kubernetes service (by default only
- "app.kubernetes.io/name" is set to the service name)
- additional_selectors: Selectors to be added to the kubernetes service (by default only
- "app.kubernetes.io/name" is set to the service name)
- additional_annotations: Annotations to be added to the kubernetes service.
- """
- super().__init__(charm, "kubernetes-service-patch")
- self.charm = charm
- self.service_name = service_name if service_name else self._app
- self.service = self._service_object(
- ports,
- service_name,
- service_type,
- additional_labels,
- additional_selectors,
- additional_annotations,
- )
-
- # Make mypy type checking happy that self._patch is a method
- assert isinstance(self._patch, MethodType)
- # Ensure this patch is applied during the 'install' and 'upgrade-charm' events
- self.framework.observe(charm.on.install, self._patch)
- self.framework.observe(charm.on.upgrade_charm, self._patch)
-
- def _service_object(
- self,
- ports: List[ServicePort],
- service_name: str = None,
- service_type: ServiceType = "ClusterIP",
- additional_labels: dict = None,
- additional_selectors: dict = None,
- additional_annotations: dict = None,
- ) -> Service:
- """Creates a valid Service representation.
-
- Args:
- ports: a list of ServicePorts
- service_name: allows setting custom name to the patched service. If none given,
- application name will be used.
- service_type: desired type of K8s service. Default value is in line with ServiceSpec's
- default value.
- additional_labels: Labels to be added to the kubernetes service (by default only
- "app.kubernetes.io/name" is set to the service name)
- additional_selectors: Selectors to be added to the kubernetes service (by default only
- "app.kubernetes.io/name" is set to the service name)
- additional_annotations: Annotations to be added to the kubernetes service.
-
- Returns:
- Service: A valid representation of a Kubernetes Service with the correct ports.
- """
- if not service_name:
- service_name = self._app
- labels = {"app.kubernetes.io/name": self._app}
- if additional_labels:
- labels.update(additional_labels)
- selector = {"app.kubernetes.io/name": self._app}
- if additional_selectors:
- selector.update(additional_selectors)
- return Service(
- apiVersion="v1",
- kind="Service",
- metadata=ObjectMeta(
- namespace=self._namespace,
- name=service_name,
- labels=labels,
- annotations=additional_annotations, # type: ignore[arg-type]
- ),
- spec=ServiceSpec(
- selector=selector,
- ports=ports,
- type=service_type,
- ),
- )
-
- def _patch(self, _) -> None:
- """Patch the Kubernetes service created by Juju to map the correct port.
-
- Raises:
- PatchFailed: if patching fails due to lack of permissions, or otherwise.
- """
- if not self.charm.unit.is_leader():
- return
-
- client = Client()
- try:
- if self.service_name != self._app:
- self._delete_and_create_service(client)
- client.patch(Service, self.service_name, self.service, patch_type=PatchType.MERGE)
- except ApiError as e:
- if e.status.code == 403:
- logger.error("Kubernetes service patch failed: `juju trust` this application.")
- else:
- logger.error("Kubernetes service patch failed: %s", str(e))
- else:
- logger.info("Kubernetes service '%s' patched successfully", self._app)
-
- def _delete_and_create_service(self, client: Client):
- service = client.get(Service, self._app, namespace=self._namespace)
- service.metadata.name = self.service_name # type: ignore[attr-defined]
- service.metadata.resourceVersion = service.metadata.uid = None # type: ignore[attr-defined] # noqa: E501
- client.delete(Service, self._app, namespace=self._namespace)
- client.create(service)
-
- def is_patched(self) -> bool:
- """Reports if the service patch has been applied.
-
- Returns:
- bool: A boolean indicating if the service patch has been applied.
- """
- client = Client()
- # Get the relevant service from the cluster
- service = client.get(Service, name=self.service_name, namespace=self._namespace)
- # Construct a list of expected ports, should the patch be applied
- expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports]
- # Construct a list in the same manner, using the fetched service
- fetched_ports = [(p.port, p.targetPort) for p in service.spec.ports] # type: ignore[attr-defined] # noqa: E501
- return expected_ports == fetched_ports
-
- @property
- def _app(self) -> str:
- """Name of the current Juju application.
-
- Returns:
- str: A string containing the name of the current Juju application.
- """
- return self.charm.app.name
-
- @property
- def _namespace(self) -> str:
- """The Kubernetes namespace we're running in.
-
- Returns:
- str: A string containing the name of the current Kubernetes namespace.
- """
- with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f:
- return f.read().strip()
diff --git a/installers/charm/osm-ng-ui/lib/charms/osm_libs/v0/utils.py b/installers/charm/osm-ng-ui/lib/charms/osm_libs/v0/utils.py
deleted file mode 100644
index d739ba68..00000000
--- a/installers/charm/osm-ng-ui/lib/charms/osm_libs/v0/utils.py
+++ /dev/null
@@ -1,544 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-# See LICENSE file for licensing details.
-# http://www.apache.org/licenses/LICENSE-2.0
-"""OSM Utils Library.
-
-This library offers some utilities made for but not limited to Charmed OSM.
-
-# Getting started
-
-Execute the following command inside your Charmed Operator folder to fetch the library.
-
-```shell
-charmcraft fetch-lib charms.osm_libs.v0.utils
-```
-
-# CharmError Exception
-
-An exception that takes to arguments, the message and the StatusBase class, which are useful
-to set the status of the charm when the exception raises.
-
-Example:
-```shell
-from charms.osm_libs.v0.utils import CharmError
-
-class MyCharm(CharmBase):
- def _on_config_changed(self, _):
- try:
- if not self.config.get("some-option"):
- raise CharmError("need some-option", BlockedStatus)
-
- if not self.mysql_ready:
- raise CharmError("waiting for mysql", WaitingStatus)
-
- # Do stuff...
-
- exception CharmError as e:
- self.unit.status = e.status
-```
-
-# Pebble validations
-
-The `check_container_ready` function checks that a container is ready,
-and therefore Pebble is ready.
-
-The `check_service_active` function checks that a service in a container is running.
-
-Both functions raise a CharmError if the validations fail.
-
-Example:
-```shell
-from charms.osm_libs.v0.utils import check_container_ready, check_service_active
-
-class MyCharm(CharmBase):
- def _on_config_changed(self, _):
- try:
- container: Container = self.unit.get_container("my-container")
- check_container_ready(container)
- check_service_active(container, "my-service")
- # Do stuff...
-
- exception CharmError as e:
- self.unit.status = e.status
-```
-
-# Debug-mode
-
-The debug-mode allows OSM developers to easily debug OSM modules.
-
-Example:
-```shell
-from charms.osm_libs.v0.utils import DebugMode
-
-class MyCharm(CharmBase):
- _stored = StoredState()
-
- def __init__(self, _):
- # ...
- container: Container = self.unit.get_container("my-container")
- hostpaths = [
- HostPath(
- config="module-hostpath",
- container_path="/usr/lib/python3/dist-packages/module"
- ),
- ]
- vscode_workspace_path = "files/vscode-workspace.json"
- self.debug_mode = DebugMode(
- self,
- self._stored,
- container,
- hostpaths,
- vscode_workspace_path,
- )
-
- def _on_update_status(self, _):
- if self.debug_mode.started:
- return
- # ...
-
- def _get_debug_mode_information(self):
- command = self.debug_mode.command
- password = self.debug_mode.password
- return command, password
-```
-
-# More
-
-- Get pod IP with `get_pod_ip()`
-"""
-from dataclasses import dataclass
-import logging
-import secrets
-import socket
-from pathlib import Path
-from typing import List
-
-from lightkube import Client
-from lightkube.models.core_v1 import HostPathVolumeSource, Volume, VolumeMount
-from lightkube.resources.apps_v1 import StatefulSet
-from ops.charm import CharmBase
-from ops.framework import Object, StoredState
-from ops.model import (
- ActiveStatus,
- BlockedStatus,
- Container,
- MaintenanceStatus,
- StatusBase,
- WaitingStatus,
-)
-from ops.pebble import ServiceStatus
-
-# The unique Charmhub library identifier, never change it
-LIBID = "e915908eebee4cdd972d484728adf984"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 5
-
-logger = logging.getLogger(__name__)
-
-
-class CharmError(Exception):
- """Charm Error Exception."""
-
- def __init__(self, message: str, status_class: StatusBase = BlockedStatus) -> None:
- self.message = message
- self.status_class = status_class
- self.status = status_class(message)
-
-
-def check_container_ready(container: Container) -> None:
- """Check Pebble has started in the container.
-
- Args:
- container (Container): Container to be checked.
-
- Raises:
- CharmError: if container is not ready.
- """
- if not container.can_connect():
- raise CharmError("waiting for pebble to start", MaintenanceStatus)
-
-
-def check_service_active(container: Container, service_name: str) -> None:
- """Check if the service is running.
-
- Args:
- container (Container): Container to be checked.
- service_name (str): Name of the service to check.
-
- Raises:
- CharmError: if the service is not running.
- """
- if service_name not in container.get_plan().services:
- raise CharmError(f"{service_name} service not configured yet", WaitingStatus)
-
- if container.get_service(service_name).current != ServiceStatus.ACTIVE:
- raise CharmError(f"{service_name} service is not running")
-
-
-def get_pod_ip() -> str:
- """Get Kubernetes Pod IP.
-
- Returns:
- str: The IP of the Pod.
- """
- return socket.gethostbyname(socket.gethostname())
-
-
-_DEBUG_SCRIPT = r"""#!/bin/bash
-# Install SSH
-
-function download_code(){{
- wget https://go.microsoft.com/fwlink/?LinkID=760868 -O code.deb
-}}
-
-function setup_envs(){{
- grep "source /debug.envs" /root/.bashrc || echo "source /debug.envs" | tee -a /root/.bashrc
-}}
-function setup_ssh(){{
- apt install ssh -y
- cat /etc/ssh/sshd_config |
- grep -E '^PermitRootLogin yes$$' || (
- echo PermitRootLogin yes |
- tee -a /etc/ssh/sshd_config
- )
- service ssh stop
- sleep 3
- service ssh start
- usermod --password $(echo {} | openssl passwd -1 -stdin) root
-}}
-
-function setup_code(){{
- apt install libasound2 -y
- (dpkg -i code.deb || apt-get install -f -y || apt-get install -f -y) && echo Code installed successfully
- code --install-extension ms-python.python --user-data-dir /root
- mkdir -p /root/.vscode-server
- cp -R /root/.vscode/extensions /root/.vscode-server/extensions
-}}
-
-export DEBIAN_FRONTEND=noninteractive
-apt update && apt install wget -y
-download_code &
-setup_ssh &
-setup_envs
-wait
-setup_code &
-wait
-"""
-
-
-@dataclass
-class SubModule:
- """Represent RO Submodules."""
- sub_module_path: str
- container_path: str
-
-
-class HostPath:
- """Represents a hostpath."""
- def __init__(self, config: str, container_path: str, submodules: dict = None) -> None:
- mount_path_items = config.split("-")
- mount_path_items.reverse()
- self.mount_path = "/" + "/".join(mount_path_items)
- self.config = config
- self.sub_module_dict = {}
- if submodules:
- for submodule in submodules.keys():
- self.sub_module_dict[submodule] = SubModule(
- sub_module_path=self.mount_path + "/" + submodule + "/" + submodules[submodule].split("/")[-1],
- container_path=submodules[submodule],
- )
- else:
- self.container_path = container_path
- self.module_name = container_path.split("/")[-1]
-
-class DebugMode(Object):
- """Class to handle the debug-mode."""
-
- def __init__(
- self,
- charm: CharmBase,
- stored: StoredState,
- container: Container,
- hostpaths: List[HostPath] = [],
- vscode_workspace_path: str = "files/vscode-workspace.json",
- ) -> None:
- super().__init__(charm, "debug-mode")
-
- self.charm = charm
- self._stored = stored
- self.hostpaths = hostpaths
- self.vscode_workspace = Path(vscode_workspace_path).read_text()
- self.container = container
-
- self._stored.set_default(
- debug_mode_started=False,
- debug_mode_vscode_command=None,
- debug_mode_password=None,
- )
-
- self.framework.observe(self.charm.on.config_changed, self._on_config_changed)
- self.framework.observe(self.charm.on[container.name].pebble_ready, self._on_config_changed)
- self.framework.observe(self.charm.on.update_status, self._on_update_status)
-
- def _on_config_changed(self, _) -> None:
- """Handler for the config-changed event."""
- if not self.charm.unit.is_leader():
- return
-
- debug_mode_enabled = self.charm.config.get("debug-mode", False)
- action = self.enable if debug_mode_enabled else self.disable
- action()
-
- def _on_update_status(self, _) -> None:
- """Handler for the update-status event."""
- if not self.charm.unit.is_leader() or not self.started:
- return
-
- self.charm.unit.status = ActiveStatus("debug-mode: ready")
-
- @property
- def started(self) -> bool:
- """Indicates whether the debug-mode has started or not."""
- return self._stored.debug_mode_started
-
- @property
- def command(self) -> str:
- """Command to launch vscode."""
- return self._stored.debug_mode_vscode_command
-
- @property
- def password(self) -> str:
- """SSH password."""
- return self._stored.debug_mode_password
-
- def enable(self, service_name: str = None) -> None:
- """Enable debug-mode.
-
- This function mounts hostpaths of the OSM modules (if set), and
- configures the container so it can be easily debugged. The setup
- includes the configuration of SSH, environment variables, and
- VSCode workspace and plugins.
-
- Args:
- service_name (str, optional): Pebble service name which has the desired environment
- variables. Mandatory if there is more than one Pebble service configured.
- """
- hostpaths_to_reconfigure = self._hostpaths_to_reconfigure()
- if self.started and not hostpaths_to_reconfigure:
- self.charm.unit.status = ActiveStatus("debug-mode: ready")
- return
-
- logger.debug("enabling debug-mode")
-
- # Mount hostpaths if set.
- # If hostpaths are mounted, the statefulset will be restarted,
- # and for that reason we return immediately. On restart, the hostpaths
- # won't be mounted and then we can continue and setup the debug-mode.
- if hostpaths_to_reconfigure:
- self.charm.unit.status = MaintenanceStatus("debug-mode: configuring hostpaths")
- self._configure_hostpaths(hostpaths_to_reconfigure)
- return
-
- self.charm.unit.status = MaintenanceStatus("debug-mode: starting")
- password = secrets.token_hex(8)
- self._setup_debug_mode(
- password,
- service_name,
- mounted_hostpaths=[hp for hp in self.hostpaths if self.charm.config.get(hp.config)],
- )
-
- self._stored.debug_mode_vscode_command = self._get_vscode_command(get_pod_ip())
- self._stored.debug_mode_password = password
- self._stored.debug_mode_started = True
- logger.info("debug-mode is ready")
- self.charm.unit.status = ActiveStatus("debug-mode: ready")
-
- def disable(self) -> None:
- """Disable debug-mode."""
- logger.debug("disabling debug-mode")
- current_status = self.charm.unit.status
- hostpaths_unmounted = self._unmount_hostpaths()
-
- if not self._stored.debug_mode_started:
- return
- self._stored.debug_mode_started = False
- self._stored.debug_mode_vscode_command = None
- self._stored.debug_mode_password = None
-
- if not hostpaths_unmounted:
- self.charm.unit.status = current_status
- self._restart()
-
- def _hostpaths_to_reconfigure(self) -> List[HostPath]:
- hostpaths_to_reconfigure: List[HostPath] = []
- client = Client()
- statefulset = client.get(StatefulSet, self.charm.app.name, namespace=self.charm.model.name)
- volumes = statefulset.spec.template.spec.volumes
-
- for hostpath in self.hostpaths:
- hostpath_is_set = True if self.charm.config.get(hostpath.config) else False
- hostpath_already_configured = next(
- (True for volume in volumes if volume.name == hostpath.config), False
- )
- if hostpath_is_set != hostpath_already_configured:
- hostpaths_to_reconfigure.append(hostpath)
-
- return hostpaths_to_reconfigure
-
- def _setup_debug_mode(
- self,
- password: str,
- service_name: str = None,
- mounted_hostpaths: List[HostPath] = [],
- ) -> None:
- services = self.container.get_plan().services
- if not service_name and len(services) != 1:
- raise Exception("Cannot start debug-mode: please set the service_name")
-
- service = None
- if not service_name:
- service_name, service = services.popitem()
- if not service:
- service = services.get(service_name)
-
- logger.debug(f"getting environment variables from service {service_name}")
- environment = service.environment
- environment_file_content = "\n".join(
- [f'export {key}="{value}"' for key, value in environment.items()]
- )
- logger.debug(f"pushing environment file to {self.container.name} container")
- self.container.push("/debug.envs", environment_file_content)
-
- # Push VSCode workspace
- logger.debug(f"pushing vscode workspace to {self.container.name} container")
- self.container.push("/debug.code-workspace", self.vscode_workspace)
-
- # Execute debugging script
- logger.debug(f"pushing debug-mode setup script to {self.container.name} container")
- self.container.push("/debug.sh", _DEBUG_SCRIPT.format(password), permissions=0o777)
- logger.debug(f"executing debug-mode setup script in {self.container.name} container")
- self.container.exec(["/debug.sh"]).wait_output()
- logger.debug(f"stopping service {service_name} in {self.container.name} container")
- self.container.stop(service_name)
-
- # Add symlinks to mounted hostpaths
- for hostpath in mounted_hostpaths:
- logger.debug(f"adding symlink for {hostpath.config}")
- if len(hostpath.sub_module_dict) > 0:
- for sub_module in hostpath.sub_module_dict.keys():
- self.container.exec(["rm", "-rf", hostpath.sub_module_dict[sub_module].container_path]).wait_output()
- self.container.exec(
- [
- "ln",
- "-s",
- hostpath.sub_module_dict[sub_module].sub_module_path,
- hostpath.sub_module_dict[sub_module].container_path,
- ]
- )
-
- else:
- self.container.exec(["rm", "-rf", hostpath.container_path]).wait_output()
- self.container.exec(
- [
- "ln",
- "-s",
- f"{hostpath.mount_path}/{hostpath.module_name}",
- hostpath.container_path,
- ]
- )
-
- def _configure_hostpaths(self, hostpaths: List[HostPath]):
- client = Client()
- statefulset = client.get(StatefulSet, self.charm.app.name, namespace=self.charm.model.name)
-
- for hostpath in hostpaths:
- if self.charm.config.get(hostpath.config):
- self._add_hostpath_to_statefulset(hostpath, statefulset)
- else:
- self._delete_hostpath_from_statefulset(hostpath, statefulset)
-
- client.replace(statefulset)
-
- def _unmount_hostpaths(self) -> bool:
- client = Client()
- hostpath_unmounted = False
- statefulset = client.get(StatefulSet, self.charm.app.name, namespace=self.charm.model.name)
-
- for hostpath in self.hostpaths:
- if self._delete_hostpath_from_statefulset(hostpath, statefulset):
- hostpath_unmounted = True
-
- if hostpath_unmounted:
- client.replace(statefulset)
-
- return hostpath_unmounted
-
- def _add_hostpath_to_statefulset(self, hostpath: HostPath, statefulset: StatefulSet):
- # Add volume
- logger.debug(f"adding volume {hostpath.config} to {self.charm.app.name} statefulset")
- volume = Volume(
- hostpath.config,
- hostPath=HostPathVolumeSource(
- path=self.charm.config[hostpath.config],
- type="Directory",
- ),
- )
- statefulset.spec.template.spec.volumes.append(volume)
-
- # Add volumeMount
- for statefulset_container in statefulset.spec.template.spec.containers:
- if statefulset_container.name != self.container.name:
- continue
-
- logger.debug(
- f"adding volumeMount {hostpath.config} to {self.container.name} container"
- )
- statefulset_container.volumeMounts.append(
- VolumeMount(mountPath=hostpath.mount_path, name=hostpath.config)
- )
-
- def _delete_hostpath_from_statefulset(self, hostpath: HostPath, statefulset: StatefulSet):
- hostpath_unmounted = False
- for volume in statefulset.spec.template.spec.volumes:
-
- if hostpath.config != volume.name:
- continue
-
- # Remove volumeMount
- for statefulset_container in statefulset.spec.template.spec.containers:
- if statefulset_container.name != self.container.name:
- continue
- for volume_mount in statefulset_container.volumeMounts:
- if volume_mount.name != hostpath.config:
- continue
-
- logger.debug(
- f"removing volumeMount {hostpath.config} from {self.container.name} container"
- )
- statefulset_container.volumeMounts.remove(volume_mount)
-
- # Remove volume
- logger.debug(
- f"removing volume {hostpath.config} from {self.charm.app.name} statefulset"
- )
- statefulset.spec.template.spec.volumes.remove(volume)
-
- hostpath_unmounted = True
- return hostpath_unmounted
-
- def _get_vscode_command(
- self,
- pod_ip: str,
- user: str = "root",
- workspace_path: str = "/debug.code-workspace",
- ) -> str:
- return f"code --remote ssh-remote+{user}@{pod_ip} {workspace_path}"
-
- def _restart(self):
- self.container.exec(["kill", "-HUP", "1"])
diff --git a/installers/charm/osm-ng-ui/lib/charms/osm_nbi/v0/nbi.py b/installers/charm/osm-ng-ui/lib/charms/osm_nbi/v0/nbi.py
deleted file mode 100644
index 130b6faa..00000000
--- a/installers/charm/osm-ng-ui/lib/charms/osm_nbi/v0/nbi.py
+++ /dev/null
@@ -1,178 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# Learn more at: https://juju.is/docs/sdk
-
-"""Nbi library.
-
-This [library](https://juju.is/docs/sdk/libraries) implements both sides of the
-`nbi` [interface](https://juju.is/docs/sdk/relations).
-
-The *provider* side of this interface is implemented by the
-[osm-nbi Charmed Operator](https://charmhub.io/osm-nbi).
-
-Any Charmed Operator that *requires* NBI for providing its
-service should implement the *requirer* side of this interface.
-
-In a nutshell using this library to implement a Charmed Operator *requiring*
-NBI would look like
-
-```
-$ charmcraft fetch-lib charms.osm_nbi.v0.nbi
-```
-
-`metadata.yaml`:
-
-```
-requires:
- nbi:
- interface: nbi
- limit: 1
-```
-
-`src/charm.py`:
-
-```
-from charms.osm_nbi.v0.nbi import NbiRequires
-from ops.charm import CharmBase
-
-
-class MyCharm(CharmBase):
-
- def __init__(self, *args):
- super().__init__(*args)
- self.nbi = NbiRequires(self)
- self.framework.observe(
- self.on["nbi"].relation_changed,
- self._on_nbi_relation_changed,
- )
- self.framework.observe(
- self.on["nbi"].relation_broken,
- self._on_nbi_relation_broken,
- )
- self.framework.observe(
- self.on["nbi"].relation_broken,
- self._on_nbi_broken,
- )
-
- def _on_nbi_relation_broken(self, event):
- # Get NBI host and port
- host: str = self.nbi.host
- port: int = self.nbi.port
- # host => "osm-nbi"
- # port => 9999
-
- def _on_nbi_broken(self, event):
- # Stop service
- # ...
- self.unit.status = BlockedStatus("need nbi relation")
-```
-
-You can file bugs
-[here](https://osm.etsi.org/bugzilla/enter_bug.cgi), selecting the `devops` module!
-"""
-from typing import Optional
-
-from ops.charm import CharmBase, CharmEvents
-from ops.framework import EventBase, EventSource, Object
-from ops.model import Relation
-
-
-# The unique Charmhub library identifier, never change it
-LIBID = "8c888f7c869949409e12c16d78ec068b"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 1
-
-NBI_HOST_APP_KEY = "host"
-NBI_PORT_APP_KEY = "port"
-
-
-class NbiRequires(Object): # pragma: no cover
- """Requires-side of the Nbi relation."""
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "nbi") -> None:
- super().__init__(charm, endpoint_name)
- self.charm = charm
- self._endpoint_name = endpoint_name
-
- @property
- def host(self) -> str:
- """Get nbi hostname."""
- relation: Relation = self.model.get_relation(self._endpoint_name)
- return (
- relation.data[relation.app].get(NBI_HOST_APP_KEY)
- if relation and relation.app
- else None
- )
-
- @property
- def port(self) -> int:
- """Get nbi port number."""
- relation: Relation = self.model.get_relation(self._endpoint_name)
- return (
- int(relation.data[relation.app].get(NBI_PORT_APP_KEY))
- if relation and relation.app
- else None
- )
-
-
-class NbiProvides(Object):
- """Provides-side of the Nbi relation."""
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "nbi") -> None:
- super().__init__(charm, endpoint_name)
- self._endpoint_name = endpoint_name
-
- def set_host_info(self, host: str, port: int, relation: Optional[Relation] = None) -> None:
- """Set Nbi host and port.
-
- This function writes in the application data of the relation, therefore,
- only the unit leader can call it.
-
- Args:
- host (str): Nbi hostname or IP address.
- port (int): Nbi port.
- relation (Optional[Relation]): Relation to update.
- If not specified, all relations will be updated.
-
- Raises:
- Exception: if a non-leader unit calls this function.
- """
- if not self.model.unit.is_leader():
- raise Exception("only the leader set host information.")
-
- if relation:
- self._update_relation_data(host, port, relation)
- return
-
- for relation in self.model.relations[self._endpoint_name]:
- self._update_relation_data(host, port, relation)
-
- def _update_relation_data(self, host: str, port: int, relation: Relation) -> None:
- """Update data in relation if needed."""
- relation.data[self.model.app][NBI_HOST_APP_KEY] = host
- relation.data[self.model.app][NBI_PORT_APP_KEY] = str(port)
diff --git a/installers/charm/osm-ng-ui/metadata.yaml b/installers/charm/osm-ng-ui/metadata.yaml
deleted file mode 100644
index be03f247..00000000
--- a/installers/charm/osm-ng-ui/metadata.yaml
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Overview on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-name: osm-ng-ui
-
-# The following metadata are human-readable and will be published prominently on Charmhub.
-
-display-name: OSM NG-UI
-
-summary: OSM Next-generation User Interface (NG-UI)
-
-description: |
- A Kubernetes operator that deploys the Next-generation User Interface of OSM.
-
- This charm doesn't make sense on its own.
- See more:
- - https://charmhub.io/osm
-
-containers:
- ng-ui:
- resource: ng-ui-image
-
-# This file populates the Resources tab on Charmhub.
-
-resources:
- ng-ui-image:
- type: oci-image
- description: OCI image for ng-ui
- upstream-source: opensourcemano/ng-ui
-
-requires:
- ingress:
- interface: ingress
- limit: 1
- nbi:
- interface: nbi
diff --git a/installers/charm/osm-ng-ui/pyproject.toml b/installers/charm/osm-ng-ui/pyproject.toml
deleted file mode 100644
index 16cf0f4b..00000000
--- a/installers/charm/osm-ng-ui/pyproject.toml
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-
-# Testing tools configuration
-[tool.coverage.run]
-branch = true
-
-[tool.coverage.report]
-show_missing = true
-
-[tool.pytest.ini_options]
-minversion = "6.0"
-log_cli_level = "INFO"
-
-# Formatting tools configuration
-[tool.black]
-line-length = 99
-target-version = ["py38"]
-
-[tool.isort]
-profile = "black"
-
-# Linting tools configuration
-[tool.flake8]
-max-line-length = 99
-max-doc-length = 99
-max-complexity = 10
-exclude = [".git", "__pycache__", ".tox", "build", "dist", "*.egg_info", "venv"]
-select = ["E", "W", "F", "C", "N", "R", "D", "H"]
-# Ignore W503, E501 because using black creates errors with this
-# Ignore D107 Missing docstring in __init__
-ignore = ["W503", "E501", "D107"]
-# D100, D101, D102, D103: Ignore missing docstrings in tests
-per-file-ignores = ["tests/*:D100,D101,D102,D103,D104"]
-docstring-convention = "google"
diff --git a/installers/charm/osm-ng-ui/requirements.txt b/installers/charm/osm-ng-ui/requirements.txt
deleted file mode 100644
index 761edd85..00000000
--- a/installers/charm/osm-ng-ui/requirements.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-ops < 2.2
-lightkube
-lightkube-models
-git+https://github.com/charmed-osm/config-validator/
diff --git a/installers/charm/osm-ng-ui/src/charm.py b/installers/charm/osm-ng-ui/src/charm.py
deleted file mode 100755
index ca517b31..00000000
--- a/installers/charm/osm-ng-ui/src/charm.py
+++ /dev/null
@@ -1,226 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# Learn more at: https://juju.is/docs/sdk
-
-"""OSM NG-UI charm.
-
-See more: https://charmhub.io/osm
-"""
-
-import logging
-import re
-from typing import Any, Dict
-
-from charms.nginx_ingress_integrator.v0.ingress import IngressRequires
-from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch
-from charms.osm_libs.v0.utils import (
- CharmError,
- check_container_ready,
- check_service_active,
-)
-from charms.osm_nbi.v0.nbi import NbiRequires
-from lightkube.models.core_v1 import ServicePort
-from ops.charm import CharmBase
-from ops.framework import StoredState
-from ops.main import main
-from ops.model import ActiveStatus, BlockedStatus, Container
-
-SERVICE_PORT = 80
-
-logger = logging.getLogger(__name__)
-
-
-class OsmNgUiCharm(CharmBase):
- """OSM NG-UI Kubernetes sidecar charm."""
-
- _stored = StoredState()
-
- def __init__(self, *args):
- super().__init__(*args)
- self.ingress = IngressRequires(
- self,
- {
- "service-hostname": self.external_hostname,
- "service-name": self.app.name,
- "service-port": SERVICE_PORT,
- },
- )
- self._observe_charm_events()
- self._patch_k8s_service()
- self._stored.set_default(default_site_patched=False)
- self.nbi = NbiRequires(self)
- self.container: Container = self.unit.get_container("ng-ui")
-
- @property
- def external_hostname(self) -> str:
- """External hostname property.
-
- Returns:
- str: the external hostname from config.
- If not set, return the ClusterIP service name.
- """
- return self.config.get("external-hostname") or self.app.name
-
- # ---------------------------------------------------------------------------
- # Handlers for Charm Events
- # ---------------------------------------------------------------------------
-
- def _on_config_changed(self, _) -> None:
- """Handler for the config-changed event."""
- try:
- self._validate_config()
- self._check_relations()
- # Check if the container is ready.
- # Eventually it will become ready after the first pebble-ready event.
- check_container_ready(self.container)
-
- self._configure_service(self.container)
- self._update_ingress_config()
- # Update charm status
- self._on_update_status()
- except CharmError as e:
- logger.debug(e.message)
- self.unit.status = e.status
-
- def _on_update_status(self, _=None) -> None:
- """Handler for the update-status event."""
- try:
- self._check_relations()
- check_container_ready(self.container)
- check_service_active(self.container, "ng-ui")
- self.unit.status = ActiveStatus()
- except CharmError as e:
- logger.debug(e.message)
- self.unit.status = e.status
-
- def _on_nbi_relation_broken(self, _) -> None:
- """Handler for the nbi relation broken event."""
- # Check Pebble has started in the container
- try:
- check_container_ready(self.container)
- check_service_active(self.container, "ng-ui")
- self.container.stop("ng-ui")
- self._stored.default_site_patched = False
- except CharmError:
- pass
- finally:
- self.unit.status = BlockedStatus("need nbi relation")
-
- # ---------------------------------------------------------------------------
- # Validation and configuration and more
- # ---------------------------------------------------------------------------
-
- def _patch_k8s_service(self) -> None:
- port = ServicePort(SERVICE_PORT, name=f"{self.app.name}")
- self.service_patcher = KubernetesServicePatch(self, [port])
-
- def _observe_charm_events(self) -> None:
- event_handler_mapping = {
- # Core lifecycle events
- self.on.ng_ui_pebble_ready: self._on_config_changed,
- self.on.config_changed: self._on_config_changed,
- self.on.update_status: self._on_update_status,
- # Relation events
- self.on["nbi"].relation_changed: self._on_config_changed,
- self.on["nbi"].relation_broken: self._on_nbi_relation_broken,
- }
- for event, handler in event_handler_mapping.items():
- self.framework.observe(event, handler)
-
- def _validate_config(self) -> None:
- """Validate charm configuration.
-
- Raises:
- CharmError: if charm configuration is invalid.
- """
- logger.debug("validating charm config")
-
- def _check_relations(self) -> None:
- """Validate charm relations.
-
- Raises:
- CharmError: if charm configuration is invalid.
- """
- logger.debug("check for missing relations")
-
- if not self.nbi.host or not self.nbi.port:
- raise CharmError("need nbi relation")
-
- def _update_ingress_config(self) -> None:
- """Update ingress config in relation."""
- ingress_config = {
- "service-hostname": self.external_hostname,
- "max-body-size": self.config["max-body-size"],
- }
- if "tls-secret-name" in self.config:
- ingress_config["tls-secret-name"] = self.config["tls-secret-name"]
- logger.debug(f"updating ingress-config: {ingress_config}")
- self.ingress.update_config(ingress_config)
-
- def _configure_service(self, container: Container) -> None:
- """Add Pebble layer with the ng-ui service."""
- logger.debug(f"configuring {self.app.name} service")
- self._patch_default_site(container)
- container.add_layer("ng-ui", self._get_layer(), combine=True)
- container.replan()
-
- def _get_layer(self) -> Dict[str, Any]:
- """Get layer for Pebble."""
- return {
- "summary": "ng-ui layer",
- "description": "pebble config layer for ng-ui",
- "services": {
- "ng-ui": {
- "override": "replace",
- "summary": "ng-ui service",
- "command": 'nginx -g "daemon off;"',
- "startup": "enabled",
- }
- },
- }
-
- def _patch_default_site(self, container: Container) -> None:
- max_body_size = self.config.get("max-body-size")
- if (
- self._stored.default_site_patched
- and max_body_size == self._stored.default_site_max_body_size
- ):
- return
- default_site_config = container.pull("/etc/nginx/sites-available/default").read()
- default_site_config = re.sub(
- "client_max_body_size .*\n",
- f"client_max_body_size {max_body_size}M;\n",
- default_site_config,
- )
- default_site_config = re.sub(
- "proxy_pass .*\n",
- f"proxy_pass http://{self.nbi.host}:{self.nbi.port};\n",
- default_site_config,
- )
- container.push("/etc/nginx/sites-available/default", default_site_config)
- self._stored.default_site_patched = True
- self._stored.default_site_max_body_size = max_body_size
-
-
-if __name__ == "__main__": # pragma: no cover
- main(OsmNgUiCharm)
diff --git a/installers/charm/osm-ng-ui/tests/integration/test_charm.py b/installers/charm/osm-ng-ui/tests/integration/test_charm.py
deleted file mode 100644
index 3f87078f..00000000
--- a/installers/charm/osm-ng-ui/tests/integration/test_charm.py
+++ /dev/null
@@ -1,157 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2023 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-# Learn more about testing at: https://juju.is/docs/sdk/testing
-
-import asyncio
-import logging
-import shlex
-from pathlib import Path
-
-import pytest
-import yaml
-from pytest_operator.plugin import OpsTest
-
-logger = logging.getLogger(__name__)
-
-METADATA = yaml.safe_load(Path("./metadata.yaml").read_text())
-NG_UI_APP = METADATA["name"]
-
-# Required charms (needed by NG UI)
-NBI_CHARM = "osm-nbi"
-NBI_APP = "nbi"
-KAFKA_CHARM = "kafka-k8s"
-KAFKA_APP = "kafka"
-MONGO_DB_CHARM = "mongodb-k8s"
-MONGO_DB_APP = "mongodb"
-PROMETHEUS_CHARM = "osm-prometheus"
-PROMETHEUS_APP = "prometheus"
-KEYSTONE_CHARM = "osm-keystone"
-KEYSTONE_APP = "keystone"
-MYSQL_CHARM = "charmed-osm-mariadb-k8s"
-MYSQL_APP = "mysql"
-ZOOKEEPER_CHARM = "zookeeper-k8s"
-ZOOKEEPER_APP = "zookeeper"
-
-INGRESS_CHARM = "nginx-ingress-integrator"
-INGRESS_APP = "ingress"
-
-ALL_APPS = [
- NBI_APP,
- NG_UI_APP,
- KAFKA_APP,
- MONGO_DB_APP,
- PROMETHEUS_APP,
- KEYSTONE_APP,
- MYSQL_APP,
- ZOOKEEPER_APP,
-]
-
-
-@pytest.mark.abort_on_fail
-async def test_ng_ui_is_deployed(ops_test: OpsTest):
- ng_ui_charm = await ops_test.build_charm(".")
- ng_ui_resources = {"ng-ui-image": METADATA["resources"]["ng-ui-image"]["upstream-source"]}
- keystone_image = "opensourcemano/keystone:testing-daily"
- keystone_deploy_cmd = f"juju deploy -m {ops_test.model_full_name} {KEYSTONE_CHARM} {KEYSTONE_APP} --resource keystone-image={keystone_image} --channel=latest/beta --series jammy"
-
- await asyncio.gather(
- ops_test.model.deploy(
- ng_ui_charm, resources=ng_ui_resources, application_name=NG_UI_APP, series="jammy"
- ),
- ops_test.model.deploy(
- NBI_CHARM, application_name=NBI_APP, channel="latest/beta", series="jammy"
- ),
- ops_test.model.deploy(KAFKA_CHARM, application_name=KAFKA_APP, channel="stable"),
- ops_test.model.deploy(MONGO_DB_CHARM, application_name=MONGO_DB_APP, channel="5/edge"),
- ops_test.model.deploy(PROMETHEUS_CHARM, application_name=PROMETHEUS_APP, channel="stable"),
- ops_test.model.deploy(ZOOKEEPER_CHARM, application_name=ZOOKEEPER_APP, channel="stable"),
- ops_test.model.deploy(MYSQL_CHARM, application_name=MYSQL_APP, channel="stable"),
- # Keystone is deployed separately because the juju python library has a bug where resources
- # are not properly deployed. See https://github.com/juju/python-libjuju/issues/766
- ops_test.run(*shlex.split(keystone_deploy_cmd), check=True),
- )
-
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(apps=ALL_APPS, timeout=300)
- logger.info("Adding relations for other components")
- await asyncio.gather(
- ops_test.model.relate(MYSQL_APP, KEYSTONE_APP),
- ops_test.model.relate(KAFKA_APP, ZOOKEEPER_APP),
- ops_test.model.relate(KEYSTONE_APP, NBI_APP),
- ops_test.model.relate(KAFKA_APP, NBI_APP),
- ops_test.model.relate("{}:mongodb".format(NBI_APP), "{}:database".format(MONGO_DB_APP)),
- ops_test.model.relate(PROMETHEUS_APP, NBI_APP),
- )
-
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(apps=ALL_APPS, timeout=300)
-
- assert ops_test.model.applications[NG_UI_APP].status == "blocked"
- unit = ops_test.model.applications[NG_UI_APP].units[0]
- assert unit.workload_status_message == "need nbi relation"
-
- logger.info("Adding relations for NG-UI")
- await ops_test.model.relate(NG_UI_APP, NBI_APP)
-
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(apps=ALL_APPS, status="active", timeout=300)
-
-
-@pytest.mark.abort_on_fail
-async def test_ng_ui_scales_up(ops_test: OpsTest):
- logger.info("Scaling up osm-ng-ui")
- expected_units = 3
- assert len(ops_test.model.applications[NG_UI_APP].units) == 1
- await ops_test.model.applications[NG_UI_APP].scale(expected_units)
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=[NG_UI_APP], status="active", wait_for_exact_units=expected_units
- )
-
-
-@pytest.mark.abort_on_fail
-async def test_ng_ui_blocks_without_relation(ops_test: OpsTest):
- await asyncio.gather(ops_test.model.applications[NBI_APP].remove_relation(NBI_APP, NG_UI_APP))
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(apps=[NG_UI_APP])
- assert ops_test.model.applications[NG_UI_APP].status == "blocked"
- for unit in ops_test.model.applications[NG_UI_APP].units:
- assert unit.workload_status_message == "need nbi relation"
- await ops_test.model.relate(NG_UI_APP, NBI_APP)
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(apps=ALL_APPS, status="active")
-
-
-@pytest.mark.abort_on_fail
-async def test_ng_ui_integration_ingress(ops_test: OpsTest):
- # Temporal workaround due to python-libjuju 2.9.42.2 bug fixed in
- # https://github.com/juju/python-libjuju/pull/854
- # To be replaced when juju version 2.9.43 is used.
- cmd = f"juju deploy {INGRESS_CHARM} {INGRESS_APP} --channel stable"
- await ops_test.run(*shlex.split(cmd), check=True)
-
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(apps=ALL_APPS + [INGRESS_APP])
-
- await ops_test.model.relate(NG_UI_APP, INGRESS_APP)
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(apps=ALL_APPS + [INGRESS_APP], status="active")
diff --git a/installers/charm/osm-ng-ui/tests/unit/test_charm.py b/installers/charm/osm-ng-ui/tests/unit/test_charm.py
deleted file mode 100644
index f4d45711..00000000
--- a/installers/charm/osm-ng-ui/tests/unit/test_charm.py
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-# Learn more about testing at: https://juju.is/docs/sdk/testing
-
-import pytest
-from ops.model import ActiveStatus, BlockedStatus
-from ops.testing import Harness
-from pytest_mock import MockerFixture
-
-from charm import CharmError, OsmNgUiCharm, check_service_active
-
-container_name = "ng-ui"
-service_name = "ng-ui"
-
-sites_default = """
-server {
- listen 80;
- server_name localhost;
- root /usr/share/nginx/html;
- index index.html index.htm;
- client_max_body_size 50M;
-
- location /osm {
- proxy_pass https://nbi:9999;
- proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
- proxy_set_header Accept-Encoding "";
- }
-
- location / {
- try_files $uri $uri/ /index.html;
- }
-}
-"""
-
-
-@pytest.fixture
-def harness(mocker: MockerFixture):
- mocker.patch("charm.KubernetesServicePatch", lambda x, y: None)
- harness = Harness(OsmNgUiCharm)
- harness.begin()
- container = harness.charm.unit.get_container("ng-ui")
- harness.set_can_connect(container, True)
- container.push("/etc/nginx/sites-available/default", sites_default, make_dirs=True)
- yield harness
- harness.cleanup()
-
-
-def test_missing_relations(harness: Harness):
- harness.charm.on.config_changed.emit()
- assert type(harness.charm.unit.status) == BlockedStatus
- assert harness.charm.unit.status.message == "need nbi relation"
-
-
-def test_ready(harness: Harness):
- _add_nbi_relation(harness)
- assert harness.charm.unit.status == ActiveStatus()
-
-
-def test_container_stops_after_relation_broken(harness: Harness):
- harness.charm.on[container_name].pebble_ready.emit(container_name)
- container = harness.charm.unit.get_container(container_name)
- relation_id = _add_nbi_relation(harness)
- check_service_active(container, service_name)
- harness.remove_relation(relation_id)
- with pytest.raises(CharmError):
- check_service_active(container, service_name)
- assert type(harness.charm.unit.status) == BlockedStatus
- assert harness.charm.unit.status.message == "need nbi relation"
-
-
-def _add_nbi_relation(harness: Harness):
- relation_id = harness.add_relation("nbi", "nbi")
- harness.add_relation_unit(relation_id, "nbi/0")
- harness.update_relation_data(relation_id, "nbi", {"host": "nbi", "port": "9999"})
- return relation_id
diff --git a/installers/charm/osm-ng-ui/tox.ini b/installers/charm/osm-ng-ui/tox.ini
deleted file mode 100644
index 8c614b8c..00000000
--- a/installers/charm/osm-ng-ui/tox.ini
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-
-[tox]
-skipsdist=True
-skip_missing_interpreters = True
-envlist = lint, unit, integration
-
-[vars]
-src_path = {toxinidir}/src
-tst_path = {toxinidir}/tests
-all_path = {[vars]src_path} {[vars]tst_path}
-
-[testenv]
-basepython = python3.8
-setenv =
- PYTHONPATH = {toxinidir}:{toxinidir}/lib:{[vars]src_path}
- PYTHONBREAKPOINT=ipdb.set_trace
- PY_COLORS=1
-passenv =
- PYTHONPATH
- CHARM_BUILD_DIR
- MODEL_SETTINGS
-
-[testenv:fmt]
-description = Apply coding style standards to code
-deps =
- black
- isort
-commands =
- isort {[vars]all_path}
- black {[vars]all_path}
-
-[testenv:lint]
-description = Check code against coding style standards
-deps =
- black
- flake8
- flake8-docstrings
- flake8-builtins
- pyproject-flake8
- pep8-naming
- isort
- codespell
-commands =
- # uncomment the following line if this charm owns a lib
- codespell {toxinidir} --skip {toxinidir}/.git --skip {toxinidir}/.tox \
- --skip {toxinidir}/build --skip {toxinidir}/lib --skip {toxinidir}/venv \
- --skip {toxinidir}/.mypy_cache --skip {toxinidir}/icon.svg
- # pflake8 wrapper supports config from pyproject.toml
- pflake8 {[vars]all_path}
- isort --check-only --diff {[vars]all_path}
- black --check --diff {[vars]all_path}
-
-[testenv:unit]
-description = Run unit tests
-deps =
- pytest
- pytest-mock
- coverage[toml]
- -r{toxinidir}/requirements.txt
-commands =
- coverage run --source={[vars]src_path} \
- -m pytest {[vars]tst_path}/unit -v --tb native -s {posargs}
- coverage report
- coverage xml
-
-[testenv:integration]
-description = Run integration tests
-deps =
- juju<3.0.0
- pytest
- pytest-operator
- -r{toxinidir}/requirements.txt
-commands =
- pytest -v --tb native {[vars]tst_path}/integration --log-cli-level=INFO -s {posargs} --cloud microk8s
diff --git a/installers/charm/osm-pol/.gitignore b/installers/charm/osm-pol/.gitignore
deleted file mode 100644
index 87d0a587..00000000
--- a/installers/charm/osm-pol/.gitignore
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-venv/
-build/
-*.charm
-.tox/
-.coverage
-coverage.xml
-__pycache__/
-*.py[cod]
-.vscode
\ No newline at end of file
diff --git a/installers/charm/osm-pol/.jujuignore b/installers/charm/osm-pol/.jujuignore
deleted file mode 100644
index 17c7a8bb..00000000
--- a/installers/charm/osm-pol/.jujuignore
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-/venv
-*.py[cod]
-*.charm
diff --git a/installers/charm/osm-pol/CONTRIBUTING.md b/installers/charm/osm-pol/CONTRIBUTING.md
deleted file mode 100644
index 4bbbeeae..00000000
--- a/installers/charm/osm-pol/CONTRIBUTING.md
+++ /dev/null
@@ -1,78 +0,0 @@
-
-
-# Contributing
-
-## Overview
-
-This documents explains the processes and practices recommended for contributing enhancements to
-this operator.
-
-- Generally, before developing enhancements to this charm, you should consider [opening an issue
- ](https://osm.etsi.org/bugzilla/enter_bug.cgi?product=OSM) explaining your use case. (Component=devops, version=master)
-- If you would like to chat with us about your use-cases or proposed implementation, you can reach
- us at [OSM Juju public channel](https://opensourcemano.slack.com/archives/C027KJGPECA).
-- Familiarising yourself with the [Charmed Operator Framework](https://juju.is/docs/sdk) library
- will help you a lot when working on new features or bug fixes.
-- All enhancements require review before being merged. Code review typically examines
- - code quality
- - test coverage
- - user experience for Juju administrators this charm.
-- Please help us out in ensuring easy to review branches by rebasing your gerrit patch onto
- the `master` branch.
-
-## Developing
-
-You can use the environments created by `tox` for development:
-
-```shell
-tox --notest -e unit
-source .tox/unit/bin/activate
-```
-
-### Testing
-
-```shell
-tox -e fmt # update your code according to linting rules
-tox -e lint # code style
-tox -e unit # unit tests
-tox -e integration # integration tests
-tox # runs 'lint' and 'unit' environments
-```
-
-## Build charm
-
-Build the charm in this git repository using:
-
-```shell
-charmcraft pack
-```
-
-### Deploy
-
-```bash
-# Create a model
-juju add-model dev
-# Enable DEBUG logging
-juju model-config logging-config="=INFO;unit=DEBUG"
-# Deploy the charm
-juju deploy ./osm-pol_ubuntu-22.04-amd64.charm \
- --resource pol-image=opensourcemano/pol:testing-daily --series jammy
-```
diff --git a/installers/charm/osm-pol/LICENSE b/installers/charm/osm-pol/LICENSE
deleted file mode 100644
index 7e9d5046..00000000
--- a/installers/charm/osm-pol/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2022 Canonical Ltd.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/installers/charm/osm-pol/README.md b/installers/charm/osm-pol/README.md
deleted file mode 100644
index cd96c755..00000000
--- a/installers/charm/osm-pol/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-
-
-
-
-# OSM POL
-
-Charmhub package name: osm-pol
-More information: https://charmhub.io/osm-pol
-
-## Other resources
-
-* [Read more](https://osm.etsi.org/docs/user-guide/latest/)
-
-* [Contributing](https://osm.etsi.org/gitweb/?p=osm/devops.git;a=blob;f=installers/charm/osm-pol/CONTRIBUTING.md)
-
-* See the [Juju SDK documentation](https://juju.is/docs/sdk) for more information about developing and improving charms.
-
diff --git a/installers/charm/osm-pol/actions.yaml b/installers/charm/osm-pol/actions.yaml
deleted file mode 100644
index 0d73468f..00000000
--- a/installers/charm/osm-pol/actions.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Actions tab on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-get-debug-mode-information:
- description: Get information to debug the container
diff --git a/installers/charm/osm-pol/charmcraft.yaml b/installers/charm/osm-pol/charmcraft.yaml
deleted file mode 100644
index f5e3ff37..00000000
--- a/installers/charm/osm-pol/charmcraft.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-
-type: charm
-bases:
- - build-on:
- - name: "ubuntu"
- channel: "22.04"
- run-on:
- - name: "ubuntu"
- channel: "22.04"
-
-parts:
- charm:
- # build-packages:
- # - git
- prime:
- - files/*
diff --git a/installers/charm/osm-pol/config.yaml b/installers/charm/osm-pol/config.yaml
deleted file mode 100644
index a92100d0..00000000
--- a/installers/charm/osm-pol/config.yaml
+++ /dev/null
@@ -1,91 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Configure tab on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-options:
- log-level:
- default: "INFO"
- description: |
- Set the Logging Level.
-
- Options:
- - TRACE
- - DEBUG
- - INFO
- - WARN
- - ERROR
- - FATAL
- type: string
- mysql-uri:
- type: string
- description: |
- Mysql URI with the following format:
- mysql://:@:/
-
- This should be removed after the mysql-integrator charm is made.
-
- If provided, this config will override the mysql relation.
-
- # Debug-mode options
- debug-mode:
- type: boolean
- description: |
- Great for OSM Developers! (Not recommended for production deployments)
-
- This action activates the Debug Mode, which sets up the container to be ready for debugging.
- As part of the setup, SSH is enabled and a VSCode workspace file is automatically populated.
-
- After enabling the debug-mode, execute the following command to get the information you need
- to start debugging:
- `juju run-action get-debug-mode-information --wait`
-
- The previous command returns the command you need to execute, and the SSH password that was set.
-
- See also:
- - https://charmhub.io/osm-pol/configure#pol-hostpath
- - https://charmhub.io/osm-pol/configure#common-hostpath
- default: false
-
- pol-hostpath:
- type: string
- description: |
- Set this config to the local path of the POL module to persist the changes done during the
- debug-mode session.
-
- Example:
- $ git clone "https://osm.etsi.org/gerrit/osm/POL" /home/ubuntu/POL
- $ juju config pol pol-hostpath=/home/ubuntu/POL
-
- This configuration only applies if option `debug-mode` is set to true.
-
- common-hostpath:
- type: string
- description: |
- Set this config to the local path of the common module to persist the changes done during the
- debug-mode session.
-
- Example:
- $ git clone "https://osm.etsi.org/gerrit/osm/common" /home/ubuntu/common
- $ juju config pol common-hostpath=/home/ubuntu/common
-
- This configuration only applies if option `debug-mode` is set to true.
diff --git a/installers/charm/osm-pol/files/vscode-workspace.json b/installers/charm/osm-pol/files/vscode-workspace.json
deleted file mode 100644
index 36e7c4db..00000000
--- a/installers/charm/osm-pol/files/vscode-workspace.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "folders": [
- {"path": "/usr/lib/python3/dist-packages/osm_policy_module"},
- {"path": "/usr/lib/python3/dist-packages/osm_common"},
- ],
- "settings": {},
- "launch": {
- "version": "0.2.0",
- "configurations": [
- {
- "name": "POL",
- "type": "python",
- "request": "launch",
- "module": "osm_policy_module.cmd.policy_module_agent",
- "justMyCode": false,
- }
- ]
- }
-}
\ No newline at end of file
diff --git a/installers/charm/osm-pol/lib/charms/data_platform_libs/v0/data_interfaces.py b/installers/charm/osm-pol/lib/charms/data_platform_libs/v0/data_interfaces.py
deleted file mode 100644
index b3da5aa4..00000000
--- a/installers/charm/osm-pol/lib/charms/data_platform_libs/v0/data_interfaces.py
+++ /dev/null
@@ -1,1130 +0,0 @@
-# Copyright 2023 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Library to manage the relation for the data-platform products.
-
-This library contains the Requires and Provides classes for handling the relation
-between an application and multiple managed application supported by the data-team:
-MySQL, Postgresql, MongoDB, Redis, and Kakfa.
-
-### Database (MySQL, Postgresql, MongoDB, and Redis)
-
-#### Requires Charm
-This library is a uniform interface to a selection of common database
-metadata, with added custom events that add convenience to database management,
-and methods to consume the application related data.
-
-
-Following an example of using the DatabaseCreatedEvent, in the context of the
-application charm code:
-
-```python
-
-from charms.data_platform_libs.v0.data_interfaces import (
- DatabaseCreatedEvent,
- DatabaseRequires,
-)
-
-class ApplicationCharm(CharmBase):
- # Application charm that connects to database charms.
-
- def __init__(self, *args):
- super().__init__(*args)
-
- # Charm events defined in the database requires charm library.
- self.database = DatabaseRequires(self, relation_name="database", database_name="database")
- self.framework.observe(self.database.on.database_created, self._on_database_created)
-
- def _on_database_created(self, event: DatabaseCreatedEvent) -> None:
- # Handle the created database
-
- # Create configuration file for app
- config_file = self._render_app_config_file(
- event.username,
- event.password,
- event.endpoints,
- )
-
- # Start application with rendered configuration
- self._start_application(config_file)
-
- # Set active status
- self.unit.status = ActiveStatus("received database credentials")
-```
-
-As shown above, the library provides some custom events to handle specific situations,
-which are listed below:
-
-- database_created: event emitted when the requested database is created.
-- endpoints_changed: event emitted when the read/write endpoints of the database have changed.
-- read_only_endpoints_changed: event emitted when the read-only endpoints of the database
- have changed. Event is not triggered if read/write endpoints changed too.
-
-If it is needed to connect multiple database clusters to the same relation endpoint
-the application charm can implement the same code as if it would connect to only
-one database cluster (like the above code example).
-
-To differentiate multiple clusters connected to the same relation endpoint
-the application charm can use the name of the remote application:
-
-```python
-
-def _on_database_created(self, event: DatabaseCreatedEvent) -> None:
- # Get the remote app name of the cluster that triggered this event
- cluster = event.relation.app.name
-```
-
-It is also possible to provide an alias for each different database cluster/relation.
-
-So, it is possible to differentiate the clusters in two ways.
-The first is to use the remote application name, i.e., `event.relation.app.name`, as above.
-
-The second way is to use different event handlers to handle each cluster events.
-The implementation would be something like the following code:
-
-```python
-
-from charms.data_platform_libs.v0.data_interfaces import (
- DatabaseCreatedEvent,
- DatabaseRequires,
-)
-
-class ApplicationCharm(CharmBase):
- # Application charm that connects to database charms.
-
- def __init__(self, *args):
- super().__init__(*args)
-
- # Define the cluster aliases and one handler for each cluster database created event.
- self.database = DatabaseRequires(
- self,
- relation_name="database",
- database_name="database",
- relations_aliases = ["cluster1", "cluster2"],
- )
- self.framework.observe(
- self.database.on.cluster1_database_created, self._on_cluster1_database_created
- )
- self.framework.observe(
- self.database.on.cluster2_database_created, self._on_cluster2_database_created
- )
-
- def _on_cluster1_database_created(self, event: DatabaseCreatedEvent) -> None:
- # Handle the created database on the cluster named cluster1
-
- # Create configuration file for app
- config_file = self._render_app_config_file(
- event.username,
- event.password,
- event.endpoints,
- )
- ...
-
- def _on_cluster2_database_created(self, event: DatabaseCreatedEvent) -> None:
- # Handle the created database on the cluster named cluster2
-
- # Create configuration file for app
- config_file = self._render_app_config_file(
- event.username,
- event.password,
- event.endpoints,
- )
- ...
-
-```
-
-### Provider Charm
-
-Following an example of using the DatabaseRequestedEvent, in the context of the
-database charm code:
-
-```python
-from charms.data_platform_libs.v0.data_interfaces import DatabaseProvides
-
-class SampleCharm(CharmBase):
-
- def __init__(self, *args):
- super().__init__(*args)
- # Charm events defined in the database provides charm library.
- self.provided_database = DatabaseProvides(self, relation_name="database")
- self.framework.observe(self.provided_database.on.database_requested,
- self._on_database_requested)
- # Database generic helper
- self.database = DatabaseHelper()
-
- def _on_database_requested(self, event: DatabaseRequestedEvent) -> None:
- # Handle the event triggered by a new database requested in the relation
- # Retrieve the database name using the charm library.
- db_name = event.database
- # generate a new user credential
- username = self.database.generate_user()
- password = self.database.generate_password()
- # set the credentials for the relation
- self.provided_database.set_credentials(event.relation.id, username, password)
- # set other variables for the relation event.set_tls("False")
-```
-As shown above, the library provides a custom event (database_requested) to handle
-the situation when an application charm requests a new database to be created.
-It's preferred to subscribe to this event instead of relation changed event to avoid
-creating a new database when other information other than a database name is
-exchanged in the relation databag.
-
-### Kafka
-
-This library is the interface to use and interact with the Kafka charm. This library contains
-custom events that add convenience to manage Kafka, and provides methods to consume the
-application related data.
-
-#### Requirer Charm
-
-```python
-
-from charms.data_platform_libs.v0.data_interfaces import (
- BootstrapServerChangedEvent,
- KafkaRequires,
- TopicCreatedEvent,
-)
-
-class ApplicationCharm(CharmBase):
-
- def __init__(self, *args):
- super().__init__(*args)
- self.kafka = KafkaRequires(self, "kafka_client", "test-topic")
- self.framework.observe(
- self.kafka.on.bootstrap_server_changed, self._on_kafka_bootstrap_server_changed
- )
- self.framework.observe(
- self.kafka.on.topic_created, self._on_kafka_topic_created
- )
-
- def _on_kafka_bootstrap_server_changed(self, event: BootstrapServerChangedEvent):
- # Event triggered when a bootstrap server was changed for this application
-
- new_bootstrap_server = event.bootstrap_server
- ...
-
- def _on_kafka_topic_created(self, event: TopicCreatedEvent):
- # Event triggered when a topic was created for this application
- username = event.username
- password = event.password
- tls = event.tls
- tls_ca= event.tls_ca
- bootstrap_server event.bootstrap_server
- consumer_group_prefic = event.consumer_group_prefix
- zookeeper_uris = event.zookeeper_uris
- ...
-
-```
-
-As shown above, the library provides some custom events to handle specific situations,
-which are listed below:
-
-- topic_created: event emitted when the requested topic is created.
-- bootstrap_server_changed: event emitted when the bootstrap server have changed.
-- credential_changed: event emitted when the credentials of Kafka changed.
-
-### Provider Charm
-
-Following the previous example, this is an example of the provider charm.
-
-```python
-class SampleCharm(CharmBase):
-
-from charms.data_platform_libs.v0.data_interfaces import (
- KafkaProvides,
- TopicRequestedEvent,
-)
-
- def __init__(self, *args):
- super().__init__(*args)
-
- # Default charm events.
- self.framework.observe(self.on.start, self._on_start)
-
- # Charm events defined in the Kafka Provides charm library.
- self.kafka_provider = KafkaProvides(self, relation_name="kafka_client")
- self.framework.observe(self.kafka_provider.on.topic_requested, self._on_topic_requested)
- # Kafka generic helper
- self.kafka = KafkaHelper()
-
- def _on_topic_requested(self, event: TopicRequestedEvent):
- # Handle the on_topic_requested event.
-
- topic = event.topic
- relation_id = event.relation.id
- # set connection info in the databag relation
- self.kafka_provider.set_bootstrap_server(relation_id, self.kafka.get_bootstrap_server())
- self.kafka_provider.set_credentials(relation_id, username=username, password=password)
- self.kafka_provider.set_consumer_group_prefix(relation_id, ...)
- self.kafka_provider.set_tls(relation_id, "False")
- self.kafka_provider.set_zookeeper_uris(relation_id, ...)
-
-```
-As shown above, the library provides a custom event (topic_requested) to handle
-the situation when an application charm requests a new topic to be created.
-It is preferred to subscribe to this event instead of relation changed event to avoid
-creating a new topic when other information other than a topic name is
-exchanged in the relation databag.
-"""
-
-import json
-import logging
-from abc import ABC, abstractmethod
-from collections import namedtuple
-from datetime import datetime
-from typing import List, Optional
-
-from ops.charm import (
- CharmBase,
- CharmEvents,
- RelationChangedEvent,
- RelationEvent,
- RelationJoinedEvent,
-)
-from ops.framework import EventSource, Object
-from ops.model import Relation
-
-# The unique Charmhub library identifier, never change it
-LIBID = "6c3e6b6680d64e9c89e611d1a15f65be"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 7
-
-PYDEPS = ["ops>=2.0.0"]
-
-logger = logging.getLogger(__name__)
-
-Diff = namedtuple("Diff", "added changed deleted")
-Diff.__doc__ = """
-A tuple for storing the diff between two data mappings.
-
-added - keys that were added
-changed - keys that still exist but have new values
-deleted - key that were deleted"""
-
-
-def diff(event: RelationChangedEvent, bucket: str) -> Diff:
- """Retrieves the diff of the data in the relation changed databag.
-
- Args:
- event: relation changed event.
- bucket: bucket of the databag (app or unit)
-
- Returns:
- a Diff instance containing the added, deleted and changed
- keys from the event relation databag.
- """
- # Retrieve the old data from the data key in the application relation databag.
- old_data = json.loads(event.relation.data[bucket].get("data", "{}"))
- # Retrieve the new data from the event relation databag.
- new_data = {
- key: value for key, value in event.relation.data[event.app].items() if key != "data"
- }
-
- # These are the keys that were added to the databag and triggered this event.
- added = new_data.keys() - old_data.keys()
- # These are the keys that were removed from the databag and triggered this event.
- deleted = old_data.keys() - new_data.keys()
- # These are the keys that already existed in the databag,
- # but had their values changed.
- changed = {key for key in old_data.keys() & new_data.keys() if old_data[key] != new_data[key]}
- # Convert the new_data to a serializable format and save it for a next diff check.
- event.relation.data[bucket].update({"data": json.dumps(new_data)})
-
- # Return the diff with all possible changes.
- return Diff(added, changed, deleted)
-
-
-# Base DataProvides and DataRequires
-
-
-class DataProvides(Object, ABC):
- """Base provides-side of the data products relation."""
-
- def __init__(self, charm: CharmBase, relation_name: str) -> None:
- super().__init__(charm, relation_name)
- self.charm = charm
- self.local_app = self.charm.model.app
- self.local_unit = self.charm.unit
- self.relation_name = relation_name
- self.framework.observe(
- charm.on[relation_name].relation_changed,
- self._on_relation_changed,
- )
-
- def _diff(self, event: RelationChangedEvent) -> Diff:
- """Retrieves the diff of the data in the relation changed databag.
-
- Args:
- event: relation changed event.
-
- Returns:
- a Diff instance containing the added, deleted and changed
- keys from the event relation databag.
- """
- return diff(event, self.local_app)
-
- @abstractmethod
- def _on_relation_changed(self, event: RelationChangedEvent) -> None:
- """Event emitted when the relation data has changed."""
- raise NotImplementedError
-
- def fetch_relation_data(self) -> dict:
- """Retrieves data from relation.
-
- This function can be used to retrieve data from a relation
- in the charm code when outside an event callback.
-
- Returns:
- a dict of the values stored in the relation data bag
- for all relation instances (indexed by the relation id).
- """
- data = {}
- for relation in self.relations:
- data[relation.id] = {
- key: value for key, value in relation.data[relation.app].items() if key != "data"
- }
- return data
-
- def _update_relation_data(self, relation_id: int, data: dict) -> None:
- """Updates a set of key-value pairs in the relation.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- data: dict containing the key-value pairs
- that should be updated in the relation.
- """
- if self.local_unit.is_leader():
- relation = self.charm.model.get_relation(self.relation_name, relation_id)
- relation.data[self.local_app].update(data)
-
- @property
- def relations(self) -> List[Relation]:
- """The list of Relation instances associated with this relation_name."""
- return list(self.charm.model.relations[self.relation_name])
-
- def set_credentials(self, relation_id: int, username: str, password: str) -> None:
- """Set credentials.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- username: user that was created.
- password: password of the created user.
- """
- self._update_relation_data(
- relation_id,
- {
- "username": username,
- "password": password,
- },
- )
-
- def set_tls(self, relation_id: int, tls: str) -> None:
- """Set whether TLS is enabled.
-
- Args:
- relation_id: the identifier for a particular relation.
- tls: whether tls is enabled (True or False).
- """
- self._update_relation_data(relation_id, {"tls": tls})
-
- def set_tls_ca(self, relation_id: int, tls_ca: str) -> None:
- """Set the TLS CA in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- tls_ca: TLS certification authority.
- """
- self._update_relation_data(relation_id, {"tls_ca": tls_ca})
-
-
-class DataRequires(Object, ABC):
- """Requires-side of the relation."""
-
- def __init__(
- self,
- charm,
- relation_name: str,
- extra_user_roles: str = None,
- ):
- """Manager of base client relations."""
- super().__init__(charm, relation_name)
- self.charm = charm
- self.extra_user_roles = extra_user_roles
- self.local_app = self.charm.model.app
- self.local_unit = self.charm.unit
- self.relation_name = relation_name
- self.framework.observe(
- self.charm.on[relation_name].relation_joined, self._on_relation_joined_event
- )
- self.framework.observe(
- self.charm.on[relation_name].relation_changed, self._on_relation_changed_event
- )
-
- @abstractmethod
- def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None:
- """Event emitted when the application joins the relation."""
- raise NotImplementedError
-
- @abstractmethod
- def _on_relation_changed_event(self, event: RelationChangedEvent) -> None:
- raise NotImplementedError
-
- def fetch_relation_data(self) -> dict:
- """Retrieves data from relation.
-
- This function can be used to retrieve data from a relation
- in the charm code when outside an event callback.
- Function cannot be used in `*-relation-broken` events and will raise an exception.
-
- Returns:
- a dict of the values stored in the relation data bag
- for all relation instances (indexed by the relation ID).
- """
- data = {}
- for relation in self.relations:
- data[relation.id] = {
- key: value for key, value in relation.data[relation.app].items() if key != "data"
- }
- return data
-
- def _update_relation_data(self, relation_id: int, data: dict) -> None:
- """Updates a set of key-value pairs in the relation.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- data: dict containing the key-value pairs
- that should be updated in the relation.
- """
- if self.local_unit.is_leader():
- relation = self.charm.model.get_relation(self.relation_name, relation_id)
- relation.data[self.local_app].update(data)
-
- def _diff(self, event: RelationChangedEvent) -> Diff:
- """Retrieves the diff of the data in the relation changed databag.
-
- Args:
- event: relation changed event.
-
- Returns:
- a Diff instance containing the added, deleted and changed
- keys from the event relation databag.
- """
- return diff(event, self.local_unit)
-
- @property
- def relations(self) -> List[Relation]:
- """The list of Relation instances associated with this relation_name."""
- return [
- relation
- for relation in self.charm.model.relations[self.relation_name]
- if self._is_relation_active(relation)
- ]
-
- @staticmethod
- def _is_relation_active(relation: Relation):
- try:
- _ = repr(relation.data)
- return True
- except RuntimeError:
- return False
-
- @staticmethod
- def _is_resource_created_for_relation(relation: Relation):
- return (
- "username" in relation.data[relation.app] and "password" in relation.data[relation.app]
- )
-
- def is_resource_created(self, relation_id: Optional[int] = None) -> bool:
- """Check if the resource has been created.
-
- This function can be used to check if the Provider answered with data in the charm code
- when outside an event callback.
-
- Args:
- relation_id (int, optional): When provided the check is done only for the relation id
- provided, otherwise the check is done for all relations
-
- Returns:
- True or False
-
- Raises:
- IndexError: If relation_id is provided but that relation does not exist
- """
- if relation_id is not None:
- try:
- relation = [relation for relation in self.relations if relation.id == relation_id][
- 0
- ]
- return self._is_resource_created_for_relation(relation)
- except IndexError:
- raise IndexError(f"relation id {relation_id} cannot be accessed")
- else:
- return (
- all(
- [
- self._is_resource_created_for_relation(relation)
- for relation in self.relations
- ]
- )
- if self.relations
- else False
- )
-
-
-# General events
-
-
-class ExtraRoleEvent(RelationEvent):
- """Base class for data events."""
-
- @property
- def extra_user_roles(self) -> Optional[str]:
- """Returns the extra user roles that were requested."""
- return self.relation.data[self.relation.app].get("extra-user-roles")
-
-
-class AuthenticationEvent(RelationEvent):
- """Base class for authentication fields for events."""
-
- @property
- def username(self) -> Optional[str]:
- """Returns the created username."""
- return self.relation.data[self.relation.app].get("username")
-
- @property
- def password(self) -> Optional[str]:
- """Returns the password for the created user."""
- return self.relation.data[self.relation.app].get("password")
-
- @property
- def tls(self) -> Optional[str]:
- """Returns whether TLS is configured."""
- return self.relation.data[self.relation.app].get("tls")
-
- @property
- def tls_ca(self) -> Optional[str]:
- """Returns TLS CA."""
- return self.relation.data[self.relation.app].get("tls-ca")
-
-
-# Database related events and fields
-
-
-class DatabaseProvidesEvent(RelationEvent):
- """Base class for database events."""
-
- @property
- def database(self) -> Optional[str]:
- """Returns the database that was requested."""
- return self.relation.data[self.relation.app].get("database")
-
-
-class DatabaseRequestedEvent(DatabaseProvidesEvent, ExtraRoleEvent):
- """Event emitted when a new database is requested for use on this relation."""
-
-
-class DatabaseProvidesEvents(CharmEvents):
- """Database events.
-
- This class defines the events that the database can emit.
- """
-
- database_requested = EventSource(DatabaseRequestedEvent)
-
-
-class DatabaseRequiresEvent(RelationEvent):
- """Base class for database events."""
-
- @property
- def endpoints(self) -> Optional[str]:
- """Returns a comma separated list of read/write endpoints."""
- return self.relation.data[self.relation.app].get("endpoints")
-
- @property
- def read_only_endpoints(self) -> Optional[str]:
- """Returns a comma separated list of read only endpoints."""
- return self.relation.data[self.relation.app].get("read-only-endpoints")
-
- @property
- def replset(self) -> Optional[str]:
- """Returns the replicaset name.
-
- MongoDB only.
- """
- return self.relation.data[self.relation.app].get("replset")
-
- @property
- def uris(self) -> Optional[str]:
- """Returns the connection URIs.
-
- MongoDB, Redis, OpenSearch.
- """
- return self.relation.data[self.relation.app].get("uris")
-
- @property
- def version(self) -> Optional[str]:
- """Returns the version of the database.
-
- Version as informed by the database daemon.
- """
- return self.relation.data[self.relation.app].get("version")
-
-
-class DatabaseCreatedEvent(AuthenticationEvent, DatabaseRequiresEvent):
- """Event emitted when a new database is created for use on this relation."""
-
-
-class DatabaseEndpointsChangedEvent(AuthenticationEvent, DatabaseRequiresEvent):
- """Event emitted when the read/write endpoints are changed."""
-
-
-class DatabaseReadOnlyEndpointsChangedEvent(AuthenticationEvent, DatabaseRequiresEvent):
- """Event emitted when the read only endpoints are changed."""
-
-
-class DatabaseRequiresEvents(CharmEvents):
- """Database events.
-
- This class defines the events that the database can emit.
- """
-
- database_created = EventSource(DatabaseCreatedEvent)
- endpoints_changed = EventSource(DatabaseEndpointsChangedEvent)
- read_only_endpoints_changed = EventSource(DatabaseReadOnlyEndpointsChangedEvent)
-
-
-# Database Provider and Requires
-
-
-class DatabaseProvides(DataProvides):
- """Provider-side of the database relations."""
-
- on = DatabaseProvidesEvents()
-
- def __init__(self, charm: CharmBase, relation_name: str) -> None:
- super().__init__(charm, relation_name)
-
- def _on_relation_changed(self, event: RelationChangedEvent) -> None:
- """Event emitted when the relation has changed."""
- # Only the leader should handle this event.
- if not self.local_unit.is_leader():
- return
-
- # Check which data has changed to emit customs events.
- diff = self._diff(event)
-
- # Emit a database requested event if the setup key (database name and optional
- # extra user roles) was added to the relation databag by the application.
- if "database" in diff.added:
- self.on.database_requested.emit(event.relation, app=event.app, unit=event.unit)
-
- def set_endpoints(self, relation_id: int, connection_strings: str) -> None:
- """Set database primary connections.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- connection_strings: database hosts and ports comma separated list.
- """
- self._update_relation_data(relation_id, {"endpoints": connection_strings})
-
- def set_read_only_endpoints(self, relation_id: int, connection_strings: str) -> None:
- """Set database replicas connection strings.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- connection_strings: database hosts and ports comma separated list.
- """
- self._update_relation_data(relation_id, {"read-only-endpoints": connection_strings})
-
- def set_replset(self, relation_id: int, replset: str) -> None:
- """Set replica set name in the application relation databag.
-
- MongoDB only.
-
- Args:
- relation_id: the identifier for a particular relation.
- replset: replica set name.
- """
- self._update_relation_data(relation_id, {"replset": replset})
-
- def set_uris(self, relation_id: int, uris: str) -> None:
- """Set the database connection URIs in the application relation databag.
-
- MongoDB, Redis, and OpenSearch only.
-
- Args:
- relation_id: the identifier for a particular relation.
- uris: connection URIs.
- """
- self._update_relation_data(relation_id, {"uris": uris})
-
- def set_version(self, relation_id: int, version: str) -> None:
- """Set the database version in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- version: database version.
- """
- self._update_relation_data(relation_id, {"version": version})
-
-
-class DatabaseRequires(DataRequires):
- """Requires-side of the database relation."""
-
- on = DatabaseRequiresEvents()
-
- def __init__(
- self,
- charm,
- relation_name: str,
- database_name: str,
- extra_user_roles: str = None,
- relations_aliases: List[str] = None,
- ):
- """Manager of database client relations."""
- super().__init__(charm, relation_name, extra_user_roles)
- self.database = database_name
- self.relations_aliases = relations_aliases
-
- # Define custom event names for each alias.
- if relations_aliases:
- # Ensure the number of aliases does not exceed the maximum
- # of connections allowed in the specific relation.
- relation_connection_limit = self.charm.meta.requires[relation_name].limit
- if len(relations_aliases) != relation_connection_limit:
- raise ValueError(
- f"The number of aliases must match the maximum number of connections allowed in the relation. "
- f"Expected {relation_connection_limit}, got {len(relations_aliases)}"
- )
-
- for relation_alias in relations_aliases:
- self.on.define_event(f"{relation_alias}_database_created", DatabaseCreatedEvent)
- self.on.define_event(
- f"{relation_alias}_endpoints_changed", DatabaseEndpointsChangedEvent
- )
- self.on.define_event(
- f"{relation_alias}_read_only_endpoints_changed",
- DatabaseReadOnlyEndpointsChangedEvent,
- )
-
- def _assign_relation_alias(self, relation_id: int) -> None:
- """Assigns an alias to a relation.
-
- This function writes in the unit data bag.
-
- Args:
- relation_id: the identifier for a particular relation.
- """
- # If no aliases were provided, return immediately.
- if not self.relations_aliases:
- return
-
- # Return if an alias was already assigned to this relation
- # (like when there are more than one unit joining the relation).
- if (
- self.charm.model.get_relation(self.relation_name, relation_id)
- .data[self.local_unit]
- .get("alias")
- ):
- return
-
- # Retrieve the available aliases (the ones that weren't assigned to any relation).
- available_aliases = self.relations_aliases[:]
- for relation in self.charm.model.relations[self.relation_name]:
- alias = relation.data[self.local_unit].get("alias")
- if alias:
- logger.debug("Alias %s was already assigned to relation %d", alias, relation.id)
- available_aliases.remove(alias)
-
- # Set the alias in the unit relation databag of the specific relation.
- relation = self.charm.model.get_relation(self.relation_name, relation_id)
- relation.data[self.local_unit].update({"alias": available_aliases[0]})
-
- def _emit_aliased_event(self, event: RelationChangedEvent, event_name: str) -> None:
- """Emit an aliased event to a particular relation if it has an alias.
-
- Args:
- event: the relation changed event that was received.
- event_name: the name of the event to emit.
- """
- alias = self._get_relation_alias(event.relation.id)
- if alias:
- getattr(self.on, f"{alias}_{event_name}").emit(
- event.relation, app=event.app, unit=event.unit
- )
-
- def _get_relation_alias(self, relation_id: int) -> Optional[str]:
- """Returns the relation alias.
-
- Args:
- relation_id: the identifier for a particular relation.
-
- Returns:
- the relation alias or None if the relation was not found.
- """
- for relation in self.charm.model.relations[self.relation_name]:
- if relation.id == relation_id:
- return relation.data[self.local_unit].get("alias")
- return None
-
- def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None:
- """Event emitted when the application joins the database relation."""
- # If relations aliases were provided, assign one to the relation.
- self._assign_relation_alias(event.relation.id)
-
- # Sets both database and extra user roles in the relation
- # if the roles are provided. Otherwise, sets only the database.
- if self.extra_user_roles:
- self._update_relation_data(
- event.relation.id,
- {
- "database": self.database,
- "extra-user-roles": self.extra_user_roles,
- },
- )
- else:
- self._update_relation_data(event.relation.id, {"database": self.database})
-
- def _on_relation_changed_event(self, event: RelationChangedEvent) -> None:
- """Event emitted when the database relation has changed."""
- # Check which data has changed to emit customs events.
- diff = self._diff(event)
-
- # Check if the database is created
- # (the database charm shared the credentials).
- if "username" in diff.added and "password" in diff.added:
- # Emit the default event (the one without an alias).
- logger.info("database created at %s", datetime.now())
- self.on.database_created.emit(event.relation, app=event.app, unit=event.unit)
-
- # Emit the aliased event (if any).
- self._emit_aliased_event(event, "database_created")
-
- # To avoid unnecessary application restarts do not trigger
- # âendpoints_changedâ event if âdatabase_createdâ is triggered.
- return
-
- # Emit an endpoints changed event if the database
- # added or changed this info in the relation databag.
- if "endpoints" in diff.added or "endpoints" in diff.changed:
- # Emit the default event (the one without an alias).
- logger.info("endpoints changed on %s", datetime.now())
- self.on.endpoints_changed.emit(event.relation, app=event.app, unit=event.unit)
-
- # Emit the aliased event (if any).
- self._emit_aliased_event(event, "endpoints_changed")
-
- # To avoid unnecessary application restarts do not trigger
- # âread_only_endpoints_changedâ event if âendpoints_changedâ is triggered.
- return
-
- # Emit a read only endpoints changed event if the database
- # added or changed this info in the relation databag.
- if "read-only-endpoints" in diff.added or "read-only-endpoints" in diff.changed:
- # Emit the default event (the one without an alias).
- logger.info("read-only-endpoints changed on %s", datetime.now())
- self.on.read_only_endpoints_changed.emit(
- event.relation, app=event.app, unit=event.unit
- )
-
- # Emit the aliased event (if any).
- self._emit_aliased_event(event, "read_only_endpoints_changed")
-
-
-# Kafka related events
-
-
-class KafkaProvidesEvent(RelationEvent):
- """Base class for Kafka events."""
-
- @property
- def topic(self) -> Optional[str]:
- """Returns the topic that was requested."""
- return self.relation.data[self.relation.app].get("topic")
-
-
-class TopicRequestedEvent(KafkaProvidesEvent, ExtraRoleEvent):
- """Event emitted when a new topic is requested for use on this relation."""
-
-
-class KafkaProvidesEvents(CharmEvents):
- """Kafka events.
-
- This class defines the events that the Kafka can emit.
- """
-
- topic_requested = EventSource(TopicRequestedEvent)
-
-
-class KafkaRequiresEvent(RelationEvent):
- """Base class for Kafka events."""
-
- @property
- def bootstrap_server(self) -> Optional[str]:
- """Returns a a comma-seperated list of broker uris."""
- return self.relation.data[self.relation.app].get("endpoints")
-
- @property
- def consumer_group_prefix(self) -> Optional[str]:
- """Returns the consumer-group-prefix."""
- return self.relation.data[self.relation.app].get("consumer-group-prefix")
-
- @property
- def zookeeper_uris(self) -> Optional[str]:
- """Returns a comma separated list of Zookeeper uris."""
- return self.relation.data[self.relation.app].get("zookeeper-uris")
-
-
-class TopicCreatedEvent(AuthenticationEvent, KafkaRequiresEvent):
- """Event emitted when a new topic is created for use on this relation."""
-
-
-class BootstrapServerChangedEvent(AuthenticationEvent, KafkaRequiresEvent):
- """Event emitted when the bootstrap server is changed."""
-
-
-class KafkaRequiresEvents(CharmEvents):
- """Kafka events.
-
- This class defines the events that the Kafka can emit.
- """
-
- topic_created = EventSource(TopicCreatedEvent)
- bootstrap_server_changed = EventSource(BootstrapServerChangedEvent)
-
-
-# Kafka Provides and Requires
-
-
-class KafkaProvides(DataProvides):
- """Provider-side of the Kafka relation."""
-
- on = KafkaProvidesEvents()
-
- def __init__(self, charm: CharmBase, relation_name: str) -> None:
- super().__init__(charm, relation_name)
-
- def _on_relation_changed(self, event: RelationChangedEvent) -> None:
- """Event emitted when the relation has changed."""
- # Only the leader should handle this event.
- if not self.local_unit.is_leader():
- return
-
- # Check which data has changed to emit customs events.
- diff = self._diff(event)
-
- # Emit a topic requested event if the setup key (topic name and optional
- # extra user roles) was added to the relation databag by the application.
- if "topic" in diff.added:
- self.on.topic_requested.emit(event.relation, app=event.app, unit=event.unit)
-
- def set_bootstrap_server(self, relation_id: int, bootstrap_server: str) -> None:
- """Set the bootstrap server in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- bootstrap_server: the bootstrap server address.
- """
- self._update_relation_data(relation_id, {"endpoints": bootstrap_server})
-
- def set_consumer_group_prefix(self, relation_id: int, consumer_group_prefix: str) -> None:
- """Set the consumer group prefix in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- consumer_group_prefix: the consumer group prefix string.
- """
- self._update_relation_data(relation_id, {"consumer-group-prefix": consumer_group_prefix})
-
- def set_zookeeper_uris(self, relation_id: int, zookeeper_uris: str) -> None:
- """Set the zookeeper uris in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- zookeeper_uris: comma-seperated list of ZooKeeper server uris.
- """
- self._update_relation_data(relation_id, {"zookeeper-uris": zookeeper_uris})
-
-
-class KafkaRequires(DataRequires):
- """Requires-side of the Kafka relation."""
-
- on = KafkaRequiresEvents()
-
- def __init__(self, charm, relation_name: str, topic: str, extra_user_roles: str = None):
- """Manager of Kafka client relations."""
- # super().__init__(charm, relation_name)
- super().__init__(charm, relation_name, extra_user_roles)
- self.charm = charm
- self.topic = topic
-
- def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None:
- """Event emitted when the application joins the Kafka relation."""
- # Sets both topic and extra user roles in the relation
- # if the roles are provided. Otherwise, sets only the topic.
- self._update_relation_data(
- event.relation.id,
- {
- "topic": self.topic,
- "extra-user-roles": self.extra_user_roles,
- }
- if self.extra_user_roles is not None
- else {"topic": self.topic},
- )
-
- def _on_relation_changed_event(self, event: RelationChangedEvent) -> None:
- """Event emitted when the Kafka relation has changed."""
- # Check which data has changed to emit customs events.
- diff = self._diff(event)
-
- # Check if the topic is created
- # (the Kafka charm shared the credentials).
- if "username" in diff.added and "password" in diff.added:
- # Emit the default event (the one without an alias).
- logger.info("topic created at %s", datetime.now())
- self.on.topic_created.emit(event.relation, app=event.app, unit=event.unit)
-
- # To avoid unnecessary application restarts do not trigger
- # âendpoints_changedâ event if âtopic_createdâ is triggered.
- return
-
- # Emit an endpoints (bootstap-server) changed event if the Kakfa endpoints
- # added or changed this info in the relation databag.
- if "endpoints" in diff.added or "endpoints" in diff.changed:
- # Emit the default event (the one without an alias).
- logger.info("endpoints changed on %s", datetime.now())
- self.on.bootstrap_server_changed.emit(
- event.relation, app=event.app, unit=event.unit
- ) # here check if this is the right design
- return
diff --git a/installers/charm/osm-pol/lib/charms/kafka_k8s/v0/kafka.py b/installers/charm/osm-pol/lib/charms/kafka_k8s/v0/kafka.py
deleted file mode 100644
index aeb5edcb..00000000
--- a/installers/charm/osm-pol/lib/charms/kafka_k8s/v0/kafka.py
+++ /dev/null
@@ -1,200 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-# See LICENSE file for licensing details.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Kafka library.
-
-This [library](https://juju.is/docs/sdk/libraries) implements both sides of the
-`kafka` [interface](https://juju.is/docs/sdk/relations).
-
-The *provider* side of this interface is implemented by the
-[kafka-k8s Charmed Operator](https://charmhub.io/kafka-k8s).
-
-Any Charmed Operator that *requires* Kafka for providing its
-service should implement the *requirer* side of this interface.
-
-In a nutshell using this library to implement a Charmed Operator *requiring*
-Kafka would look like
-
-```
-$ charmcraft fetch-lib charms.kafka_k8s.v0.kafka
-```
-
-`metadata.yaml`:
-
-```
-requires:
- kafka:
- interface: kafka
- limit: 1
-```
-
-`src/charm.py`:
-
-```
-from charms.kafka_k8s.v0.kafka import KafkaEvents, KafkaRequires
-from ops.charm import CharmBase
-
-
-class MyCharm(CharmBase):
-
- on = KafkaEvents()
-
- def __init__(self, *args):
- super().__init__(*args)
- self.kafka = KafkaRequires(self)
- self.framework.observe(
- self.on.kafka_available,
- self._on_kafka_available,
- )
- self.framework.observe(
- self.on["kafka"].relation_broken,
- self._on_kafka_broken,
- )
-
- def _on_kafka_available(self, event):
- # Get Kafka host and port
- host: str = self.kafka.host
- port: int = self.kafka.port
- # host => "kafka-k8s"
- # port => 9092
-
- def _on_kafka_broken(self, event):
- # Stop service
- # ...
- self.unit.status = BlockedStatus("need kafka relation")
-```
-
-You can file bugs
-[here](https://github.com/charmed-osm/kafka-k8s-operator/issues)!
-"""
-
-from typing import Optional
-
-from ops.charm import CharmBase, CharmEvents
-from ops.framework import EventBase, EventSource, Object
-
-# The unique Charmhub library identifier, never change it
-from ops.model import Relation
-
-LIBID = "eacc8c85082347c9aae740e0220b8376"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 4
-
-
-KAFKA_HOST_APP_KEY = "host"
-KAFKA_PORT_APP_KEY = "port"
-
-
-class _KafkaAvailableEvent(EventBase):
- """Event emitted when Kafka is available."""
-
-
-class KafkaEvents(CharmEvents):
- """Kafka events.
-
- This class defines the events that Kafka can emit.
-
- Events:
- kafka_available (_KafkaAvailableEvent)
- """
-
- kafka_available = EventSource(_KafkaAvailableEvent)
-
-
-class KafkaRequires(Object):
- """Requires-side of the Kafka relation."""
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "kafka") -> None:
- super().__init__(charm, endpoint_name)
- self.charm = charm
- self._endpoint_name = endpoint_name
-
- # Observe relation events
- event_observe_mapping = {
- charm.on[self._endpoint_name].relation_changed: self._on_relation_changed,
- }
- for event, observer in event_observe_mapping.items():
- self.framework.observe(event, observer)
-
- def _on_relation_changed(self, event) -> None:
- if event.relation.app and all(
- key in event.relation.data[event.relation.app]
- for key in (KAFKA_HOST_APP_KEY, KAFKA_PORT_APP_KEY)
- ):
- self.charm.on.kafka_available.emit()
-
- @property
- def host(self) -> str:
- """Get kafka hostname."""
- relation: Relation = self.model.get_relation(self._endpoint_name)
- return (
- relation.data[relation.app].get(KAFKA_HOST_APP_KEY)
- if relation and relation.app
- else None
- )
-
- @property
- def port(self) -> int:
- """Get kafka port number."""
- relation: Relation = self.model.get_relation(self._endpoint_name)
- return (
- int(relation.data[relation.app].get(KAFKA_PORT_APP_KEY))
- if relation and relation.app
- else None
- )
-
-
-class KafkaProvides(Object):
- """Provides-side of the Kafka relation."""
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "kafka") -> None:
- super().__init__(charm, endpoint_name)
- self._endpoint_name = endpoint_name
-
- def set_host_info(self, host: str, port: int, relation: Optional[Relation] = None) -> None:
- """Set Kafka host and port.
-
- This function writes in the application data of the relation, therefore,
- only the unit leader can call it.
-
- Args:
- host (str): Kafka hostname or IP address.
- port (int): Kafka port.
- relation (Optional[Relation]): Relation to update.
- If not specified, all relations will be updated.
-
- Raises:
- Exception: if a non-leader unit calls this function.
- """
- if not self.model.unit.is_leader():
- raise Exception("only the leader set host information.")
-
- if relation:
- self._update_relation_data(host, port, relation)
- return
-
- for relation in self.model.relations[self._endpoint_name]:
- self._update_relation_data(host, port, relation)
-
- def _update_relation_data(self, host: str, port: int, relation: Relation) -> None:
- """Update data in relation if needed."""
- relation.data[self.model.app][KAFKA_HOST_APP_KEY] = host
- relation.data[self.model.app][KAFKA_PORT_APP_KEY] = str(port)
diff --git a/installers/charm/osm-pol/lib/charms/osm_libs/v0/utils.py b/installers/charm/osm-pol/lib/charms/osm_libs/v0/utils.py
deleted file mode 100644
index d739ba68..00000000
--- a/installers/charm/osm-pol/lib/charms/osm_libs/v0/utils.py
+++ /dev/null
@@ -1,544 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-# See LICENSE file for licensing details.
-# http://www.apache.org/licenses/LICENSE-2.0
-"""OSM Utils Library.
-
-This library offers some utilities made for but not limited to Charmed OSM.
-
-# Getting started
-
-Execute the following command inside your Charmed Operator folder to fetch the library.
-
-```shell
-charmcraft fetch-lib charms.osm_libs.v0.utils
-```
-
-# CharmError Exception
-
-An exception that takes to arguments, the message and the StatusBase class, which are useful
-to set the status of the charm when the exception raises.
-
-Example:
-```shell
-from charms.osm_libs.v0.utils import CharmError
-
-class MyCharm(CharmBase):
- def _on_config_changed(self, _):
- try:
- if not self.config.get("some-option"):
- raise CharmError("need some-option", BlockedStatus)
-
- if not self.mysql_ready:
- raise CharmError("waiting for mysql", WaitingStatus)
-
- # Do stuff...
-
- exception CharmError as e:
- self.unit.status = e.status
-```
-
-# Pebble validations
-
-The `check_container_ready` function checks that a container is ready,
-and therefore Pebble is ready.
-
-The `check_service_active` function checks that a service in a container is running.
-
-Both functions raise a CharmError if the validations fail.
-
-Example:
-```shell
-from charms.osm_libs.v0.utils import check_container_ready, check_service_active
-
-class MyCharm(CharmBase):
- def _on_config_changed(self, _):
- try:
- container: Container = self.unit.get_container("my-container")
- check_container_ready(container)
- check_service_active(container, "my-service")
- # Do stuff...
-
- exception CharmError as e:
- self.unit.status = e.status
-```
-
-# Debug-mode
-
-The debug-mode allows OSM developers to easily debug OSM modules.
-
-Example:
-```shell
-from charms.osm_libs.v0.utils import DebugMode
-
-class MyCharm(CharmBase):
- _stored = StoredState()
-
- def __init__(self, _):
- # ...
- container: Container = self.unit.get_container("my-container")
- hostpaths = [
- HostPath(
- config="module-hostpath",
- container_path="/usr/lib/python3/dist-packages/module"
- ),
- ]
- vscode_workspace_path = "files/vscode-workspace.json"
- self.debug_mode = DebugMode(
- self,
- self._stored,
- container,
- hostpaths,
- vscode_workspace_path,
- )
-
- def _on_update_status(self, _):
- if self.debug_mode.started:
- return
- # ...
-
- def _get_debug_mode_information(self):
- command = self.debug_mode.command
- password = self.debug_mode.password
- return command, password
-```
-
-# More
-
-- Get pod IP with `get_pod_ip()`
-"""
-from dataclasses import dataclass
-import logging
-import secrets
-import socket
-from pathlib import Path
-from typing import List
-
-from lightkube import Client
-from lightkube.models.core_v1 import HostPathVolumeSource, Volume, VolumeMount
-from lightkube.resources.apps_v1 import StatefulSet
-from ops.charm import CharmBase
-from ops.framework import Object, StoredState
-from ops.model import (
- ActiveStatus,
- BlockedStatus,
- Container,
- MaintenanceStatus,
- StatusBase,
- WaitingStatus,
-)
-from ops.pebble import ServiceStatus
-
-# The unique Charmhub library identifier, never change it
-LIBID = "e915908eebee4cdd972d484728adf984"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 5
-
-logger = logging.getLogger(__name__)
-
-
-class CharmError(Exception):
- """Charm Error Exception."""
-
- def __init__(self, message: str, status_class: StatusBase = BlockedStatus) -> None:
- self.message = message
- self.status_class = status_class
- self.status = status_class(message)
-
-
-def check_container_ready(container: Container) -> None:
- """Check Pebble has started in the container.
-
- Args:
- container (Container): Container to be checked.
-
- Raises:
- CharmError: if container is not ready.
- """
- if not container.can_connect():
- raise CharmError("waiting for pebble to start", MaintenanceStatus)
-
-
-def check_service_active(container: Container, service_name: str) -> None:
- """Check if the service is running.
-
- Args:
- container (Container): Container to be checked.
- service_name (str): Name of the service to check.
-
- Raises:
- CharmError: if the service is not running.
- """
- if service_name not in container.get_plan().services:
- raise CharmError(f"{service_name} service not configured yet", WaitingStatus)
-
- if container.get_service(service_name).current != ServiceStatus.ACTIVE:
- raise CharmError(f"{service_name} service is not running")
-
-
-def get_pod_ip() -> str:
- """Get Kubernetes Pod IP.
-
- Returns:
- str: The IP of the Pod.
- """
- return socket.gethostbyname(socket.gethostname())
-
-
-_DEBUG_SCRIPT = r"""#!/bin/bash
-# Install SSH
-
-function download_code(){{
- wget https://go.microsoft.com/fwlink/?LinkID=760868 -O code.deb
-}}
-
-function setup_envs(){{
- grep "source /debug.envs" /root/.bashrc || echo "source /debug.envs" | tee -a /root/.bashrc
-}}
-function setup_ssh(){{
- apt install ssh -y
- cat /etc/ssh/sshd_config |
- grep -E '^PermitRootLogin yes$$' || (
- echo PermitRootLogin yes |
- tee -a /etc/ssh/sshd_config
- )
- service ssh stop
- sleep 3
- service ssh start
- usermod --password $(echo {} | openssl passwd -1 -stdin) root
-}}
-
-function setup_code(){{
- apt install libasound2 -y
- (dpkg -i code.deb || apt-get install -f -y || apt-get install -f -y) && echo Code installed successfully
- code --install-extension ms-python.python --user-data-dir /root
- mkdir -p /root/.vscode-server
- cp -R /root/.vscode/extensions /root/.vscode-server/extensions
-}}
-
-export DEBIAN_FRONTEND=noninteractive
-apt update && apt install wget -y
-download_code &
-setup_ssh &
-setup_envs
-wait
-setup_code &
-wait
-"""
-
-
-@dataclass
-class SubModule:
- """Represent RO Submodules."""
- sub_module_path: str
- container_path: str
-
-
-class HostPath:
- """Represents a hostpath."""
- def __init__(self, config: str, container_path: str, submodules: dict = None) -> None:
- mount_path_items = config.split("-")
- mount_path_items.reverse()
- self.mount_path = "/" + "/".join(mount_path_items)
- self.config = config
- self.sub_module_dict = {}
- if submodules:
- for submodule in submodules.keys():
- self.sub_module_dict[submodule] = SubModule(
- sub_module_path=self.mount_path + "/" + submodule + "/" + submodules[submodule].split("/")[-1],
- container_path=submodules[submodule],
- )
- else:
- self.container_path = container_path
- self.module_name = container_path.split("/")[-1]
-
-class DebugMode(Object):
- """Class to handle the debug-mode."""
-
- def __init__(
- self,
- charm: CharmBase,
- stored: StoredState,
- container: Container,
- hostpaths: List[HostPath] = [],
- vscode_workspace_path: str = "files/vscode-workspace.json",
- ) -> None:
- super().__init__(charm, "debug-mode")
-
- self.charm = charm
- self._stored = stored
- self.hostpaths = hostpaths
- self.vscode_workspace = Path(vscode_workspace_path).read_text()
- self.container = container
-
- self._stored.set_default(
- debug_mode_started=False,
- debug_mode_vscode_command=None,
- debug_mode_password=None,
- )
-
- self.framework.observe(self.charm.on.config_changed, self._on_config_changed)
- self.framework.observe(self.charm.on[container.name].pebble_ready, self._on_config_changed)
- self.framework.observe(self.charm.on.update_status, self._on_update_status)
-
- def _on_config_changed(self, _) -> None:
- """Handler for the config-changed event."""
- if not self.charm.unit.is_leader():
- return
-
- debug_mode_enabled = self.charm.config.get("debug-mode", False)
- action = self.enable if debug_mode_enabled else self.disable
- action()
-
- def _on_update_status(self, _) -> None:
- """Handler for the update-status event."""
- if not self.charm.unit.is_leader() or not self.started:
- return
-
- self.charm.unit.status = ActiveStatus("debug-mode: ready")
-
- @property
- def started(self) -> bool:
- """Indicates whether the debug-mode has started or not."""
- return self._stored.debug_mode_started
-
- @property
- def command(self) -> str:
- """Command to launch vscode."""
- return self._stored.debug_mode_vscode_command
-
- @property
- def password(self) -> str:
- """SSH password."""
- return self._stored.debug_mode_password
-
- def enable(self, service_name: str = None) -> None:
- """Enable debug-mode.
-
- This function mounts hostpaths of the OSM modules (if set), and
- configures the container so it can be easily debugged. The setup
- includes the configuration of SSH, environment variables, and
- VSCode workspace and plugins.
-
- Args:
- service_name (str, optional): Pebble service name which has the desired environment
- variables. Mandatory if there is more than one Pebble service configured.
- """
- hostpaths_to_reconfigure = self._hostpaths_to_reconfigure()
- if self.started and not hostpaths_to_reconfigure:
- self.charm.unit.status = ActiveStatus("debug-mode: ready")
- return
-
- logger.debug("enabling debug-mode")
-
- # Mount hostpaths if set.
- # If hostpaths are mounted, the statefulset will be restarted,
- # and for that reason we return immediately. On restart, the hostpaths
- # won't be mounted and then we can continue and setup the debug-mode.
- if hostpaths_to_reconfigure:
- self.charm.unit.status = MaintenanceStatus("debug-mode: configuring hostpaths")
- self._configure_hostpaths(hostpaths_to_reconfigure)
- return
-
- self.charm.unit.status = MaintenanceStatus("debug-mode: starting")
- password = secrets.token_hex(8)
- self._setup_debug_mode(
- password,
- service_name,
- mounted_hostpaths=[hp for hp in self.hostpaths if self.charm.config.get(hp.config)],
- )
-
- self._stored.debug_mode_vscode_command = self._get_vscode_command(get_pod_ip())
- self._stored.debug_mode_password = password
- self._stored.debug_mode_started = True
- logger.info("debug-mode is ready")
- self.charm.unit.status = ActiveStatus("debug-mode: ready")
-
- def disable(self) -> None:
- """Disable debug-mode."""
- logger.debug("disabling debug-mode")
- current_status = self.charm.unit.status
- hostpaths_unmounted = self._unmount_hostpaths()
-
- if not self._stored.debug_mode_started:
- return
- self._stored.debug_mode_started = False
- self._stored.debug_mode_vscode_command = None
- self._stored.debug_mode_password = None
-
- if not hostpaths_unmounted:
- self.charm.unit.status = current_status
- self._restart()
-
- def _hostpaths_to_reconfigure(self) -> List[HostPath]:
- hostpaths_to_reconfigure: List[HostPath] = []
- client = Client()
- statefulset = client.get(StatefulSet, self.charm.app.name, namespace=self.charm.model.name)
- volumes = statefulset.spec.template.spec.volumes
-
- for hostpath in self.hostpaths:
- hostpath_is_set = True if self.charm.config.get(hostpath.config) else False
- hostpath_already_configured = next(
- (True for volume in volumes if volume.name == hostpath.config), False
- )
- if hostpath_is_set != hostpath_already_configured:
- hostpaths_to_reconfigure.append(hostpath)
-
- return hostpaths_to_reconfigure
-
- def _setup_debug_mode(
- self,
- password: str,
- service_name: str = None,
- mounted_hostpaths: List[HostPath] = [],
- ) -> None:
- services = self.container.get_plan().services
- if not service_name and len(services) != 1:
- raise Exception("Cannot start debug-mode: please set the service_name")
-
- service = None
- if not service_name:
- service_name, service = services.popitem()
- if not service:
- service = services.get(service_name)
-
- logger.debug(f"getting environment variables from service {service_name}")
- environment = service.environment
- environment_file_content = "\n".join(
- [f'export {key}="{value}"' for key, value in environment.items()]
- )
- logger.debug(f"pushing environment file to {self.container.name} container")
- self.container.push("/debug.envs", environment_file_content)
-
- # Push VSCode workspace
- logger.debug(f"pushing vscode workspace to {self.container.name} container")
- self.container.push("/debug.code-workspace", self.vscode_workspace)
-
- # Execute debugging script
- logger.debug(f"pushing debug-mode setup script to {self.container.name} container")
- self.container.push("/debug.sh", _DEBUG_SCRIPT.format(password), permissions=0o777)
- logger.debug(f"executing debug-mode setup script in {self.container.name} container")
- self.container.exec(["/debug.sh"]).wait_output()
- logger.debug(f"stopping service {service_name} in {self.container.name} container")
- self.container.stop(service_name)
-
- # Add symlinks to mounted hostpaths
- for hostpath in mounted_hostpaths:
- logger.debug(f"adding symlink for {hostpath.config}")
- if len(hostpath.sub_module_dict) > 0:
- for sub_module in hostpath.sub_module_dict.keys():
- self.container.exec(["rm", "-rf", hostpath.sub_module_dict[sub_module].container_path]).wait_output()
- self.container.exec(
- [
- "ln",
- "-s",
- hostpath.sub_module_dict[sub_module].sub_module_path,
- hostpath.sub_module_dict[sub_module].container_path,
- ]
- )
-
- else:
- self.container.exec(["rm", "-rf", hostpath.container_path]).wait_output()
- self.container.exec(
- [
- "ln",
- "-s",
- f"{hostpath.mount_path}/{hostpath.module_name}",
- hostpath.container_path,
- ]
- )
-
- def _configure_hostpaths(self, hostpaths: List[HostPath]):
- client = Client()
- statefulset = client.get(StatefulSet, self.charm.app.name, namespace=self.charm.model.name)
-
- for hostpath in hostpaths:
- if self.charm.config.get(hostpath.config):
- self._add_hostpath_to_statefulset(hostpath, statefulset)
- else:
- self._delete_hostpath_from_statefulset(hostpath, statefulset)
-
- client.replace(statefulset)
-
- def _unmount_hostpaths(self) -> bool:
- client = Client()
- hostpath_unmounted = False
- statefulset = client.get(StatefulSet, self.charm.app.name, namespace=self.charm.model.name)
-
- for hostpath in self.hostpaths:
- if self._delete_hostpath_from_statefulset(hostpath, statefulset):
- hostpath_unmounted = True
-
- if hostpath_unmounted:
- client.replace(statefulset)
-
- return hostpath_unmounted
-
- def _add_hostpath_to_statefulset(self, hostpath: HostPath, statefulset: StatefulSet):
- # Add volume
- logger.debug(f"adding volume {hostpath.config} to {self.charm.app.name} statefulset")
- volume = Volume(
- hostpath.config,
- hostPath=HostPathVolumeSource(
- path=self.charm.config[hostpath.config],
- type="Directory",
- ),
- )
- statefulset.spec.template.spec.volumes.append(volume)
-
- # Add volumeMount
- for statefulset_container in statefulset.spec.template.spec.containers:
- if statefulset_container.name != self.container.name:
- continue
-
- logger.debug(
- f"adding volumeMount {hostpath.config} to {self.container.name} container"
- )
- statefulset_container.volumeMounts.append(
- VolumeMount(mountPath=hostpath.mount_path, name=hostpath.config)
- )
-
- def _delete_hostpath_from_statefulset(self, hostpath: HostPath, statefulset: StatefulSet):
- hostpath_unmounted = False
- for volume in statefulset.spec.template.spec.volumes:
-
- if hostpath.config != volume.name:
- continue
-
- # Remove volumeMount
- for statefulset_container in statefulset.spec.template.spec.containers:
- if statefulset_container.name != self.container.name:
- continue
- for volume_mount in statefulset_container.volumeMounts:
- if volume_mount.name != hostpath.config:
- continue
-
- logger.debug(
- f"removing volumeMount {hostpath.config} from {self.container.name} container"
- )
- statefulset_container.volumeMounts.remove(volume_mount)
-
- # Remove volume
- logger.debug(
- f"removing volume {hostpath.config} from {self.charm.app.name} statefulset"
- )
- statefulset.spec.template.spec.volumes.remove(volume)
-
- hostpath_unmounted = True
- return hostpath_unmounted
-
- def _get_vscode_command(
- self,
- pod_ip: str,
- user: str = "root",
- workspace_path: str = "/debug.code-workspace",
- ) -> str:
- return f"code --remote ssh-remote+{user}@{pod_ip} {workspace_path}"
-
- def _restart(self):
- self.container.exec(["kill", "-HUP", "1"])
diff --git a/installers/charm/osm-pol/metadata.yaml b/installers/charm/osm-pol/metadata.yaml
deleted file mode 100644
index adf189a2..00000000
--- a/installers/charm/osm-pol/metadata.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Overview on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-name: osm-pol
-
-# The following metadata are human-readable and will be published prominently on Charmhub.
-
-display-name: OSM POL
-
-summary: OSM Policy module (POL)
-
-description: |
- A Kubernetes operator that deploys the Policy module of OSM.
-
- TODO include description of the module!!!
-
- This charm doesn't make sense on its own.
- See more:
- - https://charmhub.io/osm
-
-containers:
- pol:
- resource: pol-image
-
-# This file populates the Resources tab on Charmhub.
-
-resources:
- pol-image:
- type: oci-image
- description: OCI image for pol
- upstream-source: opensourcemano/pol
-
-requires:
- kafka:
- interface: kafka
- limit: 1
- mongodb:
- interface: mongodb_client
- limit: 1
- mysql:
- interface: mysql
- limit: 1
diff --git a/installers/charm/osm-pol/pyproject.toml b/installers/charm/osm-pol/pyproject.toml
deleted file mode 100644
index 16cf0f4b..00000000
--- a/installers/charm/osm-pol/pyproject.toml
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-
-# Testing tools configuration
-[tool.coverage.run]
-branch = true
-
-[tool.coverage.report]
-show_missing = true
-
-[tool.pytest.ini_options]
-minversion = "6.0"
-log_cli_level = "INFO"
-
-# Formatting tools configuration
-[tool.black]
-line-length = 99
-target-version = ["py38"]
-
-[tool.isort]
-profile = "black"
-
-# Linting tools configuration
-[tool.flake8]
-max-line-length = 99
-max-doc-length = 99
-max-complexity = 10
-exclude = [".git", "__pycache__", ".tox", "build", "dist", "*.egg_info", "venv"]
-select = ["E", "W", "F", "C", "N", "R", "D", "H"]
-# Ignore W503, E501 because using black creates errors with this
-# Ignore D107 Missing docstring in __init__
-ignore = ["W503", "E501", "D107"]
-# D100, D101, D102, D103: Ignore missing docstrings in tests
-per-file-ignores = ["tests/*:D100,D101,D102,D103,D104"]
-docstring-convention = "google"
diff --git a/installers/charm/osm-pol/requirements.txt b/installers/charm/osm-pol/requirements.txt
deleted file mode 100644
index 398d4ad3..00000000
--- a/installers/charm/osm-pol/requirements.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-ops < 2.2
-lightkube
-lightkube-models
-# git+https://github.com/charmed-osm/config-validator/
diff --git a/installers/charm/osm-pol/src/charm.py b/installers/charm/osm-pol/src/charm.py
deleted file mode 100755
index 07bf87e1..00000000
--- a/installers/charm/osm-pol/src/charm.py
+++ /dev/null
@@ -1,241 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# Learn more at: https://juju.is/docs/sdk
-
-"""OSM POL charm.
-
-See more: https://charmhub.io/osm
-"""
-
-import logging
-from typing import Any, Dict
-
-from charms.data_platform_libs.v0.data_interfaces import DatabaseRequires
-from charms.kafka_k8s.v0.kafka import KafkaEvents, KafkaRequires
-from charms.osm_libs.v0.utils import (
- CharmError,
- DebugMode,
- HostPath,
- check_container_ready,
- check_service_active,
-)
-from ops.charm import ActionEvent, CharmBase
-from ops.framework import StoredState
-from ops.main import main
-from ops.model import ActiveStatus, Container
-
-from legacy_interfaces import MysqlClient
-
-HOSTPATHS = [
- HostPath(
- config="pol-hostpath",
- container_path="/usr/lib/python3/dist-packages/osm_policy_module",
- ),
- HostPath(
- config="common-hostpath",
- container_path="/usr/lib/python3/dist-packages/osm_common",
- ),
-]
-
-logger = logging.getLogger(__name__)
-
-
-class OsmPolCharm(CharmBase):
- """OSM POL Kubernetes sidecar charm."""
-
- on = KafkaEvents()
- _stored = StoredState()
- container_name = "pol"
- service_name = "pol"
-
- def __init__(self, *args):
- super().__init__(*args)
-
- self.kafka = KafkaRequires(self)
- self.mongodb_client = DatabaseRequires(self, "mongodb", database_name="osm")
- self.mysql_client = MysqlClient(self, "mysql")
- self._observe_charm_events()
- self.container: Container = self.unit.get_container(self.container_name)
- self.debug_mode = DebugMode(self, self._stored, self.container, HOSTPATHS)
-
- # ---------------------------------------------------------------------------
- # Handlers for Charm Events
- # ---------------------------------------------------------------------------
-
- def _on_config_changed(self, _) -> None:
- """Handler for the config-changed event."""
- try:
- self._validate_config()
- self._check_relations()
- # Check if the container is ready.
- # Eventually it will become ready after the first pebble-ready event.
- check_container_ready(self.container)
-
- if not self.debug_mode.started:
- self._configure_service(self.container)
- # Update charm status
- self._on_update_status()
- except CharmError as e:
- logger.debug(e.message)
- self.unit.status = e.status
-
- def _on_update_status(self, _=None) -> None:
- """Handler for the update-status event."""
- try:
- self._validate_config()
- self._check_relations()
- check_container_ready(self.container)
- if self.debug_mode.started:
- return
- check_service_active(self.container, self.service_name)
- self.unit.status = ActiveStatus()
- except CharmError as e:
- logger.debug(e.message)
- self.unit.status = e.status
-
- def _on_required_relation_broken(self, _) -> None:
- """Handler for the kafka-broken event."""
- # Check Pebble has started in the container
- try:
- check_container_ready(self.container)
- check_service_active(self.container, self.service_name)
- self.container.stop(self.container_name)
- except CharmError:
- pass
- self._on_update_status()
-
- def _on_get_debug_mode_information_action(self, event: ActionEvent) -> None:
- """Handler for the get-debug-mode-information action event."""
- if not self.debug_mode.started:
- event.fail("debug-mode has not started. Hint: juju config pol debug-mode=true")
- return
-
- debug_info = {"command": self.debug_mode.command, "password": self.debug_mode.password}
- event.set_results(debug_info)
-
- # ---------------------------------------------------------------------------
- # Validation and configuration and more
- # ---------------------------------------------------------------------------
-
- def _observe_charm_events(self) -> None:
- event_handler_mapping = {
- # Core lifecycle events
- self.on.pol_pebble_ready: self._on_config_changed,
- self.on.config_changed: self._on_config_changed,
- self.on.update_status: self._on_update_status,
- # Relation events
- self.on.kafka_available: self._on_config_changed,
- self.on["kafka"].relation_broken: self._on_required_relation_broken,
- self.on["mysql"].relation_changed: self._on_config_changed,
- self.on["mysql"].relation_broken: self._on_config_changed,
- self.mongodb_client.on.database_created: self._on_config_changed,
- self.on["mongodb"].relation_broken: self._on_required_relation_broken,
- # Action events
- self.on.get_debug_mode_information_action: self._on_get_debug_mode_information_action,
- }
-
- for event, handler in event_handler_mapping.items():
- self.framework.observe(event, handler)
-
- def _is_database_available(self) -> bool:
- try:
- return self.mongodb_client.is_resource_created()
- except KeyError:
- return False
-
- def _validate_config(self) -> None:
- """Validate charm configuration.
-
- Raises:
- CharmError: if charm configuration is invalid.
- """
- logger.debug("validating charm config")
-
- def _check_relations(self) -> None:
- """Validate charm relations.
-
- Raises:
- CharmError: if charm configuration is invalid.
- """
- logger.debug("check for missing relations")
- missing_relations = []
-
- if not self.kafka.host or not self.kafka.port:
- missing_relations.append("kafka")
- if not self._is_database_available():
- missing_relations.append("mongodb")
- if not self.config.get("mysql-uri") and self.mysql_client.is_missing_data_in_unit():
- missing_relations.append("mysql")
-
- if missing_relations:
- relations_str = ", ".join(missing_relations)
- one_relation_missing = len(missing_relations) == 1
- error_msg = f'need {relations_str} relation{"" if one_relation_missing else "s"}'
- logger.warning(error_msg)
- raise CharmError(error_msg)
-
- def _configure_service(self, container: Container) -> None:
- """Add Pebble layer with the pol service."""
- logger.debug(f"configuring {self.app.name} service")
- container.add_layer("pol", self._get_layer(), combine=True)
- container.replan()
-
- def _get_layer(self) -> Dict[str, Any]:
- """Get layer for Pebble."""
- return {
- "summary": "pol layer",
- "description": "pebble config layer for pol",
- "services": {
- self.service_name: {
- "override": "replace",
- "summary": "pol service",
- "command": "/bin/bash scripts/start.sh",
- "startup": "enabled",
- "user": "appuser",
- "group": "appuser",
- "environment": {
- # General configuration
- "OSMPOL_GLOBAL_LOGLEVEL": self.config["log-level"],
- # Kafka configuration
- "OSMPOL_MESSAGE_HOST": self.kafka.host,
- "OSMPOL_MESSAGE_PORT": self.kafka.port,
- "OSMPOL_MESSAGE_DRIVER": "kafka",
- # Database Mongodb configuration
- "OSMPOL_DATABASE_DRIVER": "mongo",
- "OSMPOL_DATABASE_URI": self._get_mongodb_uri(),
- # Database MySQL configuration
- "OSMPOL_SQL_DATABASE_URI": self._get_mysql_uri(),
- },
- }
- },
- }
-
- def _get_mysql_uri(self):
- return self.config.get("mysql-uri") or self.mysql_client.get_root_uri("pol")
-
- def _get_mongodb_uri(self):
- return list(self.mongodb_client.fetch_relation_data().values())[0]["uris"]
-
-
-if __name__ == "__main__": # pragma: no cover
- main(OsmPolCharm)
diff --git a/installers/charm/osm-pol/src/legacy_interfaces.py b/installers/charm/osm-pol/src/legacy_interfaces.py
deleted file mode 100644
index 443cba84..00000000
--- a/installers/charm/osm-pol/src/legacy_interfaces.py
+++ /dev/null
@@ -1,165 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-# flake8: noqa
-
-import ops
-
-
-class BaseRelationClient(ops.framework.Object):
- """Requires side of a Kafka Endpoint"""
-
- def __init__(
- self,
- charm: ops.charm.CharmBase,
- relation_name: str,
- mandatory_fields: list = [],
- ):
- super().__init__(charm, relation_name)
- self.relation_name = relation_name
- self.mandatory_fields = mandatory_fields
- self._update_relation()
-
- def get_data_from_unit(self, key: str):
- if not self.relation:
- # This update relation doesn't seem to be needed, but I added it because apparently
- # the data is empty in the unit tests.
- # In reality, the constructor is called in every hook.
- # In the unit tests when doing an update_relation_data, apparently it is not called.
- self._update_relation()
- if self.relation:
- for unit in self.relation.units:
- data = self.relation.data[unit].get(key)
- if data:
- return data
-
- def get_data_from_app(self, key: str):
- if not self.relation or self.relation.app not in self.relation.data:
- # This update relation doesn't seem to be needed, but I added it because apparently
- # the data is empty in the unit tests.
- # In reality, the constructor is called in every hook.
- # In the unit tests when doing an update_relation_data, apparently it is not called.
- self._update_relation()
- if self.relation and self.relation.app in self.relation.data:
- data = self.relation.data[self.relation.app].get(key)
- if data:
- return data
-
- def is_missing_data_in_unit(self):
- return not all([self.get_data_from_unit(field) for field in self.mandatory_fields])
-
- def is_missing_data_in_app(self):
- return not all([self.get_data_from_app(field) for field in self.mandatory_fields])
-
- def _update_relation(self):
- self.relation = self.framework.model.get_relation(self.relation_name)
-
-
-class MongoClient(BaseRelationClient):
- """Requires side of a Mongo Endpoint"""
-
- mandatory_fields_mapping = {
- "reactive": ["connection_string"],
- "ops": ["replica_set_uri", "replica_set_name"],
- }
-
- def __init__(self, charm: ops.charm.CharmBase, relation_name: str):
- super().__init__(charm, relation_name, mandatory_fields=[])
-
- @property
- def connection_string(self):
- if self.is_opts():
- replica_set_uri = self.get_data_from_unit("replica_set_uri")
- replica_set_name = self.get_data_from_unit("replica_set_name")
- return f"{replica_set_uri}?replicaSet={replica_set_name}"
- else:
- return self.get_data_from_unit("connection_string")
-
- def is_opts(self):
- return not self.is_missing_data_in_unit_ops()
-
- def is_missing_data_in_unit(self):
- return self.is_missing_data_in_unit_ops() and self.is_missing_data_in_unit_reactive()
-
- def is_missing_data_in_unit_ops(self):
- return not all(
- [self.get_data_from_unit(field) for field in self.mandatory_fields_mapping["ops"]]
- )
-
- def is_missing_data_in_unit_reactive(self):
- return not all(
- [self.get_data_from_unit(field) for field in self.mandatory_fields_mapping["reactive"]]
- )
-
-
-class MysqlClient(BaseRelationClient):
- """Requires side of a Mysql Endpoint"""
-
- mandatory_fields = ["host", "port", "user", "password", "root_password"]
-
- def __init__(self, charm: ops.charm.CharmBase, relation_name: str):
- super().__init__(charm, relation_name, self.mandatory_fields)
-
- @property
- def host(self):
- return self.get_data_from_unit("host")
-
- @property
- def port(self):
- return self.get_data_from_unit("port")
-
- @property
- def user(self):
- return self.get_data_from_unit("user")
-
- @property
- def password(self):
- return self.get_data_from_unit("password")
-
- @property
- def root_password(self):
- return self.get_data_from_unit("root_password")
-
- @property
- def database(self):
- return self.get_data_from_unit("database")
-
- def get_root_uri(self, database: str):
- """
- Get the URI for the mysql connection with the root user credentials
- :param: database: Database name
- :return: A string with the following format:
- mysql://root:@:/
- """
- return "mysql://root:{}@{}:{}/{}".format(
- self.root_password, self.host, self.port, database
- )
-
- def get_uri(self):
- """
- Get the URI for the mysql connection with the standard user credentials
- :param: database: Database name
- :return: A string with the following format:
- mysql://:@:/
- """
- return "mysql://{}:{}@{}:{}/{}".format(
- self.user, self.password, self.host, self.port, self.database
- )
diff --git a/installers/charm/osm-pol/tests/integration/test_charm.py b/installers/charm/osm-pol/tests/integration/test_charm.py
deleted file mode 100644
index 92100006..00000000
--- a/installers/charm/osm-pol/tests/integration/test_charm.py
+++ /dev/null
@@ -1,170 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-# Learn more about testing at: https://juju.is/docs/sdk/testing
-
-import asyncio
-import logging
-from pathlib import Path
-
-import pytest
-import yaml
-from pytest_operator.plugin import OpsTest
-
-logger = logging.getLogger(__name__)
-
-METADATA = yaml.safe_load(Path("./metadata.yaml").read_text())
-POL_APP = METADATA["name"]
-KAFKA_CHARM = "kafka-k8s"
-KAFKA_APP = "kafka"
-MONGO_DB_CHARM = "mongodb-k8s"
-MONGO_DB_APP = "mongodb"
-MARIADB_CHARM = "charmed-osm-mariadb-k8s"
-MARIADB_APP = "mariadb"
-ZOOKEEPER_CHARM = "zookeeper-k8s"
-ZOOKEEPER_APP = "zookeeper"
-APPS = [KAFKA_APP, ZOOKEEPER_APP, MONGO_DB_APP, MARIADB_APP, POL_APP]
-
-
-@pytest.mark.abort_on_fail
-async def test_pol_is_deployed(ops_test: OpsTest):
- charm = await ops_test.build_charm(".")
- resources = {"pol-image": METADATA["resources"]["pol-image"]["upstream-source"]}
-
- await asyncio.gather(
- ops_test.model.deploy(
- charm, resources=resources, application_name=POL_APP, series="jammy"
- ),
- ops_test.model.deploy(KAFKA_CHARM, application_name=KAFKA_APP, channel="stable"),
- ops_test.model.deploy(MONGO_DB_CHARM, application_name=MONGO_DB_APP, channel="5/edge"),
- ops_test.model.deploy(MARIADB_CHARM, application_name=MARIADB_APP, channel="stable"),
- ops_test.model.deploy(ZOOKEEPER_CHARM, application_name=ZOOKEEPER_APP, channel="stable"),
- )
-
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS,
- )
- assert ops_test.model.applications[POL_APP].status == "blocked"
- unit = ops_test.model.applications[POL_APP].units[0]
- assert unit.workload_status_message == "need kafka, mongodb, mysql relations"
-
- logger.info("Adding relations for other components")
- await ops_test.model.add_relation(KAFKA_APP, ZOOKEEPER_APP)
-
- logger.info("Adding relations for POL")
- await ops_test.model.add_relation(POL_APP, KAFKA_APP)
- await ops_test.model.add_relation(
- "{}:mongodb".format(POL_APP), "{}:database".format(MONGO_DB_APP)
- )
- await ops_test.model.add_relation(POL_APP, MARIADB_APP)
-
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS,
- status="active",
- )
-
-
-@pytest.mark.abort_on_fail
-async def test_pol_scales_up(ops_test: OpsTest):
- logger.info("Scaling up osm-pol")
- expected_units = 3
- assert len(ops_test.model.applications[POL_APP].units) == 1
- await ops_test.model.applications[POL_APP].scale(expected_units)
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=[POL_APP], status="active", wait_for_exact_units=expected_units
- )
-
-
-@pytest.mark.abort_on_fail
-@pytest.mark.parametrize("relation_to_remove", [KAFKA_APP, MONGO_DB_APP, MARIADB_APP])
-async def test_pol_blocks_without_relation(ops_test: OpsTest, relation_to_remove):
- logger.info("Removing relation: %s", relation_to_remove)
- # mongoDB relation is named "database"
- local_relation = relation_to_remove
- if relation_to_remove == MONGO_DB_APP:
- local_relation = "database"
- # mariaDB relation is named "mysql"
- if relation_to_remove == MARIADB_APP:
- local_relation = "mysql"
- await asyncio.gather(
- ops_test.model.applications[relation_to_remove].remove_relation(local_relation, POL_APP)
- )
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(apps=[POL_APP])
- assert ops_test.model.applications[POL_APP].status == "blocked"
- for unit in ops_test.model.applications[POL_APP].units:
- assert (
- unit.workload_status_message
- == f"need {'mysql' if relation_to_remove == MARIADB_APP else relation_to_remove} relation"
- )
- await ops_test.model.add_relation(POL_APP, relation_to_remove)
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS,
- status="active",
- )
-
-
-@pytest.mark.abort_on_fail
-async def test_pol_action_debug_mode_disabled(ops_test: OpsTest):
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS,
- status="active",
- )
- logger.info("Running action 'get-debug-mode-information'")
- action = (
- await ops_test.model.applications[POL_APP]
- .units[0]
- .run_action("get-debug-mode-information")
- )
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(apps=[POL_APP])
- status = await ops_test.model.get_action_status(uuid_or_prefix=action.entity_id)
- assert status[action.entity_id] == "failed"
-
-
-@pytest.mark.abort_on_fail
-async def test_pol_action_debug_mode_enabled(ops_test: OpsTest):
- await ops_test.model.applications[POL_APP].set_config({"debug-mode": "true"})
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS,
- status="active",
- )
- logger.info("Running action 'get-debug-mode-information'")
- # list of units is not ordered
- unit_id = list(
- filter(
- lambda x: (x.entity_id == f"{POL_APP}/0"), ops_test.model.applications[POL_APP].units
- )
- )[0]
- action = await unit_id.run_action("get-debug-mode-information")
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(apps=[POL_APP])
- status = await ops_test.model.get_action_status(uuid_or_prefix=action.entity_id)
- message = await ops_test.model.get_action_output(action_uuid=action.entity_id)
- assert status[action.entity_id] == "completed"
- assert "command" in message
- assert "password" in message
diff --git a/installers/charm/osm-pol/tests/unit/test_charm.py b/installers/charm/osm-pol/tests/unit/test_charm.py
deleted file mode 100644
index 1b5013ae..00000000
--- a/installers/charm/osm-pol/tests/unit/test_charm.py
+++ /dev/null
@@ -1,98 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-# Learn more about testing at: https://juju.is/docs/sdk/testing
-
-import pytest
-from ops.model import ActiveStatus, BlockedStatus
-from ops.testing import Harness
-from pytest_mock import MockerFixture
-
-from charm import CharmError, OsmPolCharm, check_service_active
-
-container_name = "pol"
-service_name = "pol"
-
-
-@pytest.fixture
-def harness(mocker: MockerFixture):
- harness = Harness(OsmPolCharm)
- harness.begin()
- harness.container_pebble_ready(container_name)
- yield harness
- harness.cleanup()
-
-
-def test_missing_relations(harness: Harness):
- harness.charm.on.config_changed.emit()
- assert type(harness.charm.unit.status) == BlockedStatus
- assert all(
- relation in harness.charm.unit.status.message for relation in ["mongodb", "kafka", "mysql"]
- )
-
-
-def test_ready(harness: Harness):
- _add_relations(harness)
- assert harness.charm.unit.status == ActiveStatus()
-
-
-def test_container_stops_after_relation_broken(harness: Harness):
- harness.charm.on[container_name].pebble_ready.emit(container_name)
- container = harness.charm.unit.get_container(container_name)
- relation_ids = _add_relations(harness)
- check_service_active(container, service_name)
- harness.remove_relation(relation_ids[0])
- with pytest.raises(CharmError):
- check_service_active(container, service_name)
-
-
-def _add_relations(harness: Harness):
- relation_ids = []
- # Add mongo relation
- relation_id = harness.add_relation("mongodb", "mongodb")
- harness.add_relation_unit(relation_id, "mongodb/0")
- harness.update_relation_data(
- relation_id,
- "mongodb",
- {"uris": "mongodb://:1234", "username": "user", "password": "password"},
- )
- relation_ids.append(relation_id)
- # Add kafka relation
- relation_id = harness.add_relation("kafka", "kafka")
- harness.add_relation_unit(relation_id, "kafka/0")
- harness.update_relation_data(relation_id, "kafka", {"host": "kafka", "port": "9092"})
- relation_ids.append(relation_id)
- # Add mysql relation
- relation_id = harness.add_relation("mysql", "mysql")
- harness.add_relation_unit(relation_id, "mysql/0")
- harness.update_relation_data(
- relation_id,
- "mysql/0",
- {
- "host": "mysql",
- "port": "3306",
- "user": "mano",
- "password": "manopw",
- "root_password": "rootmanopw",
- },
- )
- relation_ids.append(relation_id)
- return relation_ids
diff --git a/installers/charm/osm-pol/tox.ini b/installers/charm/osm-pol/tox.ini
deleted file mode 100644
index 2d95eca6..00000000
--- a/installers/charm/osm-pol/tox.ini
+++ /dev/null
@@ -1,92 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-
-[tox]
-skipsdist=True
-skip_missing_interpreters = True
-envlist = lint, unit, integration
-
-[vars]
-src_path = {toxinidir}/src/
-tst_path = {toxinidir}/tests/
-all_path = {[vars]src_path} {[vars]tst_path}
-
-[testenv]
-basepython = python3.8
-setenv =
- PYTHONPATH = {toxinidir}:{toxinidir}/lib:{[vars]src_path}
- PYTHONBREAKPOINT=ipdb.set_trace
- PY_COLORS=1
-passenv =
- PYTHONPATH
- CHARM_BUILD_DIR
- MODEL_SETTINGS
-
-[testenv:fmt]
-description = Apply coding style standards to code
-deps =
- black
- isort
-commands =
- isort {[vars]all_path}
- black {[vars]all_path}
-
-[testenv:lint]
-description = Check code against coding style standards
-deps =
- black
- flake8
- flake8-docstrings
- flake8-builtins
- pyproject-flake8
- pep8-naming
- isort
- codespell
-commands =
- codespell {toxinidir} --skip {toxinidir}/.git --skip {toxinidir}/.tox \
- --skip {toxinidir}/build --skip {toxinidir}/lib --skip {toxinidir}/venv \
- --skip {toxinidir}/.mypy_cache --skip {toxinidir}/icon.svg
- # pflake8 wrapper supports config from pyproject.toml
- pflake8 {[vars]all_path}
- isort --check-only --diff {[vars]all_path}
- black --check --diff {[vars]all_path}
-
-[testenv:unit]
-description = Run unit tests
-deps =
- pytest
- pytest-mock
- coverage[toml]
- -r{toxinidir}/requirements.txt
-commands =
- coverage run --source={[vars]src_path} \
- -m pytest --ignore={[vars]tst_path}integration -v --tb native -s {posargs}
- coverage report
- coverage xml
-
-[testenv:integration]
-description = Run integration tests
-deps =
- pytest
- juju<3
- pytest-operator
- -r{toxinidir}/requirements.txt
-commands =
- pytest -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs} --cloud microk8s
diff --git a/installers/charm/osm-ro/.gitignore b/installers/charm/osm-ro/.gitignore
deleted file mode 100644
index 87d0a587..00000000
--- a/installers/charm/osm-ro/.gitignore
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-venv/
-build/
-*.charm
-.tox/
-.coverage
-coverage.xml
-__pycache__/
-*.py[cod]
-.vscode
\ No newline at end of file
diff --git a/installers/charm/osm-ro/.jujuignore b/installers/charm/osm-ro/.jujuignore
deleted file mode 100644
index 17c7a8bb..00000000
--- a/installers/charm/osm-ro/.jujuignore
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-/venv
-*.py[cod]
-*.charm
diff --git a/installers/charm/osm-ro/CONTRIBUTING.md b/installers/charm/osm-ro/CONTRIBUTING.md
deleted file mode 100644
index 61f2a0a5..00000000
--- a/installers/charm/osm-ro/CONTRIBUTING.md
+++ /dev/null
@@ -1,78 +0,0 @@
-
-
-# Contributing
-
-## Overview
-
-This documents explains the processes and practices recommended for contributing enhancements to
-this operator.
-
-- Generally, before developing enhancements to this charm, you should consider [opening an issue
- ](https://osm.etsi.org/bugzilla/enter_bug.cgi?product=OSM) explaining your use case. (Component=devops, version=master)
-- If you would like to chat with us about your use-cases or proposed implementation, you can reach
- us at [OSM Juju public channel](https://opensourcemano.slack.com/archives/C027KJGPECA).
-- Familiarising yourself with the [Charmed Operator Framework](https://juju.is/docs/sdk) library
- will help you a lot when working on new features or bug fixes.
-- All enhancements require review before being merged. Code review typically examines
- - code quality
- - test coverage
- - user experience for Juju administrators this charm.
-- Please help us out in ensuring easy to review branches by rebasing your gerrit patch onto
- the `master` branch.
-
-## Developing
-
-You can use the environments created by `tox` for development:
-
-```shell
-tox --notest -e unit
-source .tox/unit/bin/activate
-```
-
-### Testing
-
-```shell
-tox -e fmt # update your code according to linting rules
-tox -e lint # code style
-tox -e unit # unit tests
-tox -e integration # integration tests
-tox # runs 'lint' and 'unit' environments
-```
-
-## Build charm
-
-Build the charm in this git repository using:
-
-```shell
-charmcraft pack
-```
-
-### Deploy
-
-```bash
-# Create a model
-juju add-model dev
-# Enable DEBUG logging
-juju model-config logging-config="=INFO;unit=DEBUG"
-# Deploy the charm
-juju deploy ./osm-ro_ubuntu-22.04-amd64.charm \
- --resource ro-image=opensourcemano/ro:testing-daily --series jammy
-```
diff --git a/installers/charm/osm-ro/LICENSE b/installers/charm/osm-ro/LICENSE
deleted file mode 100644
index 7e9d5046..00000000
--- a/installers/charm/osm-ro/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2022 Canonical Ltd.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/installers/charm/osm-ro/README.md b/installers/charm/osm-ro/README.md
deleted file mode 100644
index 44250f9a..00000000
--- a/installers/charm/osm-ro/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
-
-
-
-
-# OSM RO
-
-Charmhub package name: osm-ro
-More information: https://charmhub.io/osm-ro
-
-## Other resources
-
-* [Read more](https://osm.etsi.org/docs/user-guide/latest/)
-
-* [Contributing](https://osm.etsi.org/gitweb/?p=osm/devops.git;a=blob;f=installers/charm/osm-ro/CONTRIBUTING.md)
-
-* See the [Juju SDK documentation](https://juju.is/docs/sdk) for more information about developing and improving charms.
diff --git a/installers/charm/osm-ro/actions.yaml b/installers/charm/osm-ro/actions.yaml
deleted file mode 100644
index 0d73468f..00000000
--- a/installers/charm/osm-ro/actions.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Actions tab on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-get-debug-mode-information:
- description: Get information to debug the container
diff --git a/installers/charm/osm-ro/charmcraft.yaml b/installers/charm/osm-ro/charmcraft.yaml
deleted file mode 100644
index f5e3ff37..00000000
--- a/installers/charm/osm-ro/charmcraft.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-
-type: charm
-bases:
- - build-on:
- - name: "ubuntu"
- channel: "22.04"
- run-on:
- - name: "ubuntu"
- channel: "22.04"
-
-parts:
- charm:
- # build-packages:
- # - git
- prime:
- - files/*
diff --git a/installers/charm/osm-ro/config.yaml b/installers/charm/osm-ro/config.yaml
deleted file mode 100644
index 036eecd4..00000000
--- a/installers/charm/osm-ro/config.yaml
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Configure tab on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-options:
- log-level:
- default: "INFO"
- description: |
- Set the Logging Level.
-
- Options:
- - TRACE
- - DEBUG
- - INFO
- - WARN
- - ERROR
- - FATAL
- type: string
- database-commonkey:
- description: Database COMMON KEY
- type: string
- default: osm
- certificates:
- type: string
- description: |
- comma-separated list of : certificates.
- Where:
- name: name of the file for the certificate
- content: base64 content of the certificate
- The path for the files is /certs.
-
- # Debug-mode options
- debug-mode:
- type: boolean
- description: |
- Great for OSM Developers! (Not recommended for production deployments)
-
- This action activates the Debug Mode, which sets up the container to be ready for debugging.
- As part of the setup, SSH is enabled and a VSCode workspace file is automatically populated.
-
- After enabling the debug-mode, execute the following command to get the information you need
- to start debugging:
- `juju run-action get-debug-mode-information --wait`
-
- The previous command returns the command you need to execute, and the SSH password that was set.
-
- See also:
- - https://charmhub.io/osm-ro/configure#ro-hostpath
- - https://charmhub.io/osm-ro/configure#common-hostpath
- default: false
- ro-hostpath:
- type: string
- description: |
- Set this config to the local path of the ro module to persist the changes done during the
- debug-mode session.
-
- Example:
- $ git clone "https://osm.etsi.org/gerrit/osm/RO" /home/ubuntu/ro
- $ juju config ro ro-hostpath=/home/ubuntu/ro
-
- This configuration only applies if option `debug-mode` is set to true.
-
- common-hostpath:
- type: string
- description: |
- Set this config to the local path of the common module to persist the changes done during the
- debug-mode session.
-
- Example:
- $ git clone "https://osm.etsi.org/gerrit/osm/common" /home/ubuntu/common
- $ juju config ro common-hostpath=/home/ubuntu/common
-
- This configuration only applies if option `debug-mode` is set to true.
-
- period_refresh_active:
- type: int
- description: |
- Updates the VNF status from VIM for every given period of time seconds.
- Values equal or greater than 60 is allowed.
- Disable the updates from VIM by setting -1.
- Example:
- $ juju config ro period_refresh_active=-1
- $ juju config ro period_refresh_active=100
diff --git a/installers/charm/osm-ro/files/vscode-workspace.json b/installers/charm/osm-ro/files/vscode-workspace.json
deleted file mode 100644
index 5ab09130..00000000
--- a/installers/charm/osm-ro/files/vscode-workspace.json
+++ /dev/null
@@ -1,34 +0,0 @@
-{
- "folders": [
- {"path": "/usr/lib/python3/dist-packages/osm_ng_ro"},
- {"path": "/usr/lib/python3/dist-packages/osm_common"},
- {"path": "/usr/lib/python3/dist-packages/osm_ro_plugin"},
- {"path": "/usr/lib/python3/dist-packages/osm_rosdn_arista_cloudvision"},
- {"path": "/usr/lib/python3/dist-packages/osm_rosdn_dpb"},
- {"path": "/usr/lib/python3/dist-packages/osm_rosdn_dynpac"},
- {"path": "/usr/lib/python3/dist-packages/osm_rosdn_floodlightof"},
- {"path": "/usr/lib/python3/dist-packages/osm_rosdn_ietfl2vpn"},
- {"path": "/usr/lib/python3/dist-packages/osm_rosdn_juniper_contrail"},
- {"path": "/usr/lib/python3/dist-packages/osm_rosdn_odlof"},
- {"path": "/usr/lib/python3/dist-packages/osm_rosdn_onos_vpls"},
- {"path": "/usr/lib/python3/dist-packages/osm_rosdn_onosof"},
- {"path": "/usr/lib/python3/dist-packages/osm_rovim_aws"},
- {"path": "/usr/lib/python3/dist-packages/osm_rovim_azure"},
- {"path": "/usr/lib/python3/dist-packages/osm_rovim_gcp"},
- {"path": "/usr/lib/python3/dist-packages/osm_rovim_openstack"},
- {"path": "/usr/lib/python3/dist-packages/osm_rovim_vmware"},
- ],
- "launch": {
- "configurations": [
- {
- "module": "osm_ng_ro.ro_main",
- "name": "NG RO",
- "request": "launch",
- "type": "python",
- "justMyCode": false,
- }
- ],
- "version": "0.2.0",
- },
- "settings": {},
-}
\ No newline at end of file
diff --git a/installers/charm/osm-ro/lib/charms/data_platform_libs/v0/data_interfaces.py b/installers/charm/osm-ro/lib/charms/data_platform_libs/v0/data_interfaces.py
deleted file mode 100644
index b3da5aa4..00000000
--- a/installers/charm/osm-ro/lib/charms/data_platform_libs/v0/data_interfaces.py
+++ /dev/null
@@ -1,1130 +0,0 @@
-# Copyright 2023 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Library to manage the relation for the data-platform products.
-
-This library contains the Requires and Provides classes for handling the relation
-between an application and multiple managed application supported by the data-team:
-MySQL, Postgresql, MongoDB, Redis, and Kakfa.
-
-### Database (MySQL, Postgresql, MongoDB, and Redis)
-
-#### Requires Charm
-This library is a uniform interface to a selection of common database
-metadata, with added custom events that add convenience to database management,
-and methods to consume the application related data.
-
-
-Following an example of using the DatabaseCreatedEvent, in the context of the
-application charm code:
-
-```python
-
-from charms.data_platform_libs.v0.data_interfaces import (
- DatabaseCreatedEvent,
- DatabaseRequires,
-)
-
-class ApplicationCharm(CharmBase):
- # Application charm that connects to database charms.
-
- def __init__(self, *args):
- super().__init__(*args)
-
- # Charm events defined in the database requires charm library.
- self.database = DatabaseRequires(self, relation_name="database", database_name="database")
- self.framework.observe(self.database.on.database_created, self._on_database_created)
-
- def _on_database_created(self, event: DatabaseCreatedEvent) -> None:
- # Handle the created database
-
- # Create configuration file for app
- config_file = self._render_app_config_file(
- event.username,
- event.password,
- event.endpoints,
- )
-
- # Start application with rendered configuration
- self._start_application(config_file)
-
- # Set active status
- self.unit.status = ActiveStatus("received database credentials")
-```
-
-As shown above, the library provides some custom events to handle specific situations,
-which are listed below:
-
-- database_created: event emitted when the requested database is created.
-- endpoints_changed: event emitted when the read/write endpoints of the database have changed.
-- read_only_endpoints_changed: event emitted when the read-only endpoints of the database
- have changed. Event is not triggered if read/write endpoints changed too.
-
-If it is needed to connect multiple database clusters to the same relation endpoint
-the application charm can implement the same code as if it would connect to only
-one database cluster (like the above code example).
-
-To differentiate multiple clusters connected to the same relation endpoint
-the application charm can use the name of the remote application:
-
-```python
-
-def _on_database_created(self, event: DatabaseCreatedEvent) -> None:
- # Get the remote app name of the cluster that triggered this event
- cluster = event.relation.app.name
-```
-
-It is also possible to provide an alias for each different database cluster/relation.
-
-So, it is possible to differentiate the clusters in two ways.
-The first is to use the remote application name, i.e., `event.relation.app.name`, as above.
-
-The second way is to use different event handlers to handle each cluster events.
-The implementation would be something like the following code:
-
-```python
-
-from charms.data_platform_libs.v0.data_interfaces import (
- DatabaseCreatedEvent,
- DatabaseRequires,
-)
-
-class ApplicationCharm(CharmBase):
- # Application charm that connects to database charms.
-
- def __init__(self, *args):
- super().__init__(*args)
-
- # Define the cluster aliases and one handler for each cluster database created event.
- self.database = DatabaseRequires(
- self,
- relation_name="database",
- database_name="database",
- relations_aliases = ["cluster1", "cluster2"],
- )
- self.framework.observe(
- self.database.on.cluster1_database_created, self._on_cluster1_database_created
- )
- self.framework.observe(
- self.database.on.cluster2_database_created, self._on_cluster2_database_created
- )
-
- def _on_cluster1_database_created(self, event: DatabaseCreatedEvent) -> None:
- # Handle the created database on the cluster named cluster1
-
- # Create configuration file for app
- config_file = self._render_app_config_file(
- event.username,
- event.password,
- event.endpoints,
- )
- ...
-
- def _on_cluster2_database_created(self, event: DatabaseCreatedEvent) -> None:
- # Handle the created database on the cluster named cluster2
-
- # Create configuration file for app
- config_file = self._render_app_config_file(
- event.username,
- event.password,
- event.endpoints,
- )
- ...
-
-```
-
-### Provider Charm
-
-Following an example of using the DatabaseRequestedEvent, in the context of the
-database charm code:
-
-```python
-from charms.data_platform_libs.v0.data_interfaces import DatabaseProvides
-
-class SampleCharm(CharmBase):
-
- def __init__(self, *args):
- super().__init__(*args)
- # Charm events defined in the database provides charm library.
- self.provided_database = DatabaseProvides(self, relation_name="database")
- self.framework.observe(self.provided_database.on.database_requested,
- self._on_database_requested)
- # Database generic helper
- self.database = DatabaseHelper()
-
- def _on_database_requested(self, event: DatabaseRequestedEvent) -> None:
- # Handle the event triggered by a new database requested in the relation
- # Retrieve the database name using the charm library.
- db_name = event.database
- # generate a new user credential
- username = self.database.generate_user()
- password = self.database.generate_password()
- # set the credentials for the relation
- self.provided_database.set_credentials(event.relation.id, username, password)
- # set other variables for the relation event.set_tls("False")
-```
-As shown above, the library provides a custom event (database_requested) to handle
-the situation when an application charm requests a new database to be created.
-It's preferred to subscribe to this event instead of relation changed event to avoid
-creating a new database when other information other than a database name is
-exchanged in the relation databag.
-
-### Kafka
-
-This library is the interface to use and interact with the Kafka charm. This library contains
-custom events that add convenience to manage Kafka, and provides methods to consume the
-application related data.
-
-#### Requirer Charm
-
-```python
-
-from charms.data_platform_libs.v0.data_interfaces import (
- BootstrapServerChangedEvent,
- KafkaRequires,
- TopicCreatedEvent,
-)
-
-class ApplicationCharm(CharmBase):
-
- def __init__(self, *args):
- super().__init__(*args)
- self.kafka = KafkaRequires(self, "kafka_client", "test-topic")
- self.framework.observe(
- self.kafka.on.bootstrap_server_changed, self._on_kafka_bootstrap_server_changed
- )
- self.framework.observe(
- self.kafka.on.topic_created, self._on_kafka_topic_created
- )
-
- def _on_kafka_bootstrap_server_changed(self, event: BootstrapServerChangedEvent):
- # Event triggered when a bootstrap server was changed for this application
-
- new_bootstrap_server = event.bootstrap_server
- ...
-
- def _on_kafka_topic_created(self, event: TopicCreatedEvent):
- # Event triggered when a topic was created for this application
- username = event.username
- password = event.password
- tls = event.tls
- tls_ca= event.tls_ca
- bootstrap_server event.bootstrap_server
- consumer_group_prefic = event.consumer_group_prefix
- zookeeper_uris = event.zookeeper_uris
- ...
-
-```
-
-As shown above, the library provides some custom events to handle specific situations,
-which are listed below:
-
-- topic_created: event emitted when the requested topic is created.
-- bootstrap_server_changed: event emitted when the bootstrap server have changed.
-- credential_changed: event emitted when the credentials of Kafka changed.
-
-### Provider Charm
-
-Following the previous example, this is an example of the provider charm.
-
-```python
-class SampleCharm(CharmBase):
-
-from charms.data_platform_libs.v0.data_interfaces import (
- KafkaProvides,
- TopicRequestedEvent,
-)
-
- def __init__(self, *args):
- super().__init__(*args)
-
- # Default charm events.
- self.framework.observe(self.on.start, self._on_start)
-
- # Charm events defined in the Kafka Provides charm library.
- self.kafka_provider = KafkaProvides(self, relation_name="kafka_client")
- self.framework.observe(self.kafka_provider.on.topic_requested, self._on_topic_requested)
- # Kafka generic helper
- self.kafka = KafkaHelper()
-
- def _on_topic_requested(self, event: TopicRequestedEvent):
- # Handle the on_topic_requested event.
-
- topic = event.topic
- relation_id = event.relation.id
- # set connection info in the databag relation
- self.kafka_provider.set_bootstrap_server(relation_id, self.kafka.get_bootstrap_server())
- self.kafka_provider.set_credentials(relation_id, username=username, password=password)
- self.kafka_provider.set_consumer_group_prefix(relation_id, ...)
- self.kafka_provider.set_tls(relation_id, "False")
- self.kafka_provider.set_zookeeper_uris(relation_id, ...)
-
-```
-As shown above, the library provides a custom event (topic_requested) to handle
-the situation when an application charm requests a new topic to be created.
-It is preferred to subscribe to this event instead of relation changed event to avoid
-creating a new topic when other information other than a topic name is
-exchanged in the relation databag.
-"""
-
-import json
-import logging
-from abc import ABC, abstractmethod
-from collections import namedtuple
-from datetime import datetime
-from typing import List, Optional
-
-from ops.charm import (
- CharmBase,
- CharmEvents,
- RelationChangedEvent,
- RelationEvent,
- RelationJoinedEvent,
-)
-from ops.framework import EventSource, Object
-from ops.model import Relation
-
-# The unique Charmhub library identifier, never change it
-LIBID = "6c3e6b6680d64e9c89e611d1a15f65be"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 7
-
-PYDEPS = ["ops>=2.0.0"]
-
-logger = logging.getLogger(__name__)
-
-Diff = namedtuple("Diff", "added changed deleted")
-Diff.__doc__ = """
-A tuple for storing the diff between two data mappings.
-
-added - keys that were added
-changed - keys that still exist but have new values
-deleted - key that were deleted"""
-
-
-def diff(event: RelationChangedEvent, bucket: str) -> Diff:
- """Retrieves the diff of the data in the relation changed databag.
-
- Args:
- event: relation changed event.
- bucket: bucket of the databag (app or unit)
-
- Returns:
- a Diff instance containing the added, deleted and changed
- keys from the event relation databag.
- """
- # Retrieve the old data from the data key in the application relation databag.
- old_data = json.loads(event.relation.data[bucket].get("data", "{}"))
- # Retrieve the new data from the event relation databag.
- new_data = {
- key: value for key, value in event.relation.data[event.app].items() if key != "data"
- }
-
- # These are the keys that were added to the databag and triggered this event.
- added = new_data.keys() - old_data.keys()
- # These are the keys that were removed from the databag and triggered this event.
- deleted = old_data.keys() - new_data.keys()
- # These are the keys that already existed in the databag,
- # but had their values changed.
- changed = {key for key in old_data.keys() & new_data.keys() if old_data[key] != new_data[key]}
- # Convert the new_data to a serializable format and save it for a next diff check.
- event.relation.data[bucket].update({"data": json.dumps(new_data)})
-
- # Return the diff with all possible changes.
- return Diff(added, changed, deleted)
-
-
-# Base DataProvides and DataRequires
-
-
-class DataProvides(Object, ABC):
- """Base provides-side of the data products relation."""
-
- def __init__(self, charm: CharmBase, relation_name: str) -> None:
- super().__init__(charm, relation_name)
- self.charm = charm
- self.local_app = self.charm.model.app
- self.local_unit = self.charm.unit
- self.relation_name = relation_name
- self.framework.observe(
- charm.on[relation_name].relation_changed,
- self._on_relation_changed,
- )
-
- def _diff(self, event: RelationChangedEvent) -> Diff:
- """Retrieves the diff of the data in the relation changed databag.
-
- Args:
- event: relation changed event.
-
- Returns:
- a Diff instance containing the added, deleted and changed
- keys from the event relation databag.
- """
- return diff(event, self.local_app)
-
- @abstractmethod
- def _on_relation_changed(self, event: RelationChangedEvent) -> None:
- """Event emitted when the relation data has changed."""
- raise NotImplementedError
-
- def fetch_relation_data(self) -> dict:
- """Retrieves data from relation.
-
- This function can be used to retrieve data from a relation
- in the charm code when outside an event callback.
-
- Returns:
- a dict of the values stored in the relation data bag
- for all relation instances (indexed by the relation id).
- """
- data = {}
- for relation in self.relations:
- data[relation.id] = {
- key: value for key, value in relation.data[relation.app].items() if key != "data"
- }
- return data
-
- def _update_relation_data(self, relation_id: int, data: dict) -> None:
- """Updates a set of key-value pairs in the relation.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- data: dict containing the key-value pairs
- that should be updated in the relation.
- """
- if self.local_unit.is_leader():
- relation = self.charm.model.get_relation(self.relation_name, relation_id)
- relation.data[self.local_app].update(data)
-
- @property
- def relations(self) -> List[Relation]:
- """The list of Relation instances associated with this relation_name."""
- return list(self.charm.model.relations[self.relation_name])
-
- def set_credentials(self, relation_id: int, username: str, password: str) -> None:
- """Set credentials.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- username: user that was created.
- password: password of the created user.
- """
- self._update_relation_data(
- relation_id,
- {
- "username": username,
- "password": password,
- },
- )
-
- def set_tls(self, relation_id: int, tls: str) -> None:
- """Set whether TLS is enabled.
-
- Args:
- relation_id: the identifier for a particular relation.
- tls: whether tls is enabled (True or False).
- """
- self._update_relation_data(relation_id, {"tls": tls})
-
- def set_tls_ca(self, relation_id: int, tls_ca: str) -> None:
- """Set the TLS CA in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- tls_ca: TLS certification authority.
- """
- self._update_relation_data(relation_id, {"tls_ca": tls_ca})
-
-
-class DataRequires(Object, ABC):
- """Requires-side of the relation."""
-
- def __init__(
- self,
- charm,
- relation_name: str,
- extra_user_roles: str = None,
- ):
- """Manager of base client relations."""
- super().__init__(charm, relation_name)
- self.charm = charm
- self.extra_user_roles = extra_user_roles
- self.local_app = self.charm.model.app
- self.local_unit = self.charm.unit
- self.relation_name = relation_name
- self.framework.observe(
- self.charm.on[relation_name].relation_joined, self._on_relation_joined_event
- )
- self.framework.observe(
- self.charm.on[relation_name].relation_changed, self._on_relation_changed_event
- )
-
- @abstractmethod
- def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None:
- """Event emitted when the application joins the relation."""
- raise NotImplementedError
-
- @abstractmethod
- def _on_relation_changed_event(self, event: RelationChangedEvent) -> None:
- raise NotImplementedError
-
- def fetch_relation_data(self) -> dict:
- """Retrieves data from relation.
-
- This function can be used to retrieve data from a relation
- in the charm code when outside an event callback.
- Function cannot be used in `*-relation-broken` events and will raise an exception.
-
- Returns:
- a dict of the values stored in the relation data bag
- for all relation instances (indexed by the relation ID).
- """
- data = {}
- for relation in self.relations:
- data[relation.id] = {
- key: value for key, value in relation.data[relation.app].items() if key != "data"
- }
- return data
-
- def _update_relation_data(self, relation_id: int, data: dict) -> None:
- """Updates a set of key-value pairs in the relation.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- data: dict containing the key-value pairs
- that should be updated in the relation.
- """
- if self.local_unit.is_leader():
- relation = self.charm.model.get_relation(self.relation_name, relation_id)
- relation.data[self.local_app].update(data)
-
- def _diff(self, event: RelationChangedEvent) -> Diff:
- """Retrieves the diff of the data in the relation changed databag.
-
- Args:
- event: relation changed event.
-
- Returns:
- a Diff instance containing the added, deleted and changed
- keys from the event relation databag.
- """
- return diff(event, self.local_unit)
-
- @property
- def relations(self) -> List[Relation]:
- """The list of Relation instances associated with this relation_name."""
- return [
- relation
- for relation in self.charm.model.relations[self.relation_name]
- if self._is_relation_active(relation)
- ]
-
- @staticmethod
- def _is_relation_active(relation: Relation):
- try:
- _ = repr(relation.data)
- return True
- except RuntimeError:
- return False
-
- @staticmethod
- def _is_resource_created_for_relation(relation: Relation):
- return (
- "username" in relation.data[relation.app] and "password" in relation.data[relation.app]
- )
-
- def is_resource_created(self, relation_id: Optional[int] = None) -> bool:
- """Check if the resource has been created.
-
- This function can be used to check if the Provider answered with data in the charm code
- when outside an event callback.
-
- Args:
- relation_id (int, optional): When provided the check is done only for the relation id
- provided, otherwise the check is done for all relations
-
- Returns:
- True or False
-
- Raises:
- IndexError: If relation_id is provided but that relation does not exist
- """
- if relation_id is not None:
- try:
- relation = [relation for relation in self.relations if relation.id == relation_id][
- 0
- ]
- return self._is_resource_created_for_relation(relation)
- except IndexError:
- raise IndexError(f"relation id {relation_id} cannot be accessed")
- else:
- return (
- all(
- [
- self._is_resource_created_for_relation(relation)
- for relation in self.relations
- ]
- )
- if self.relations
- else False
- )
-
-
-# General events
-
-
-class ExtraRoleEvent(RelationEvent):
- """Base class for data events."""
-
- @property
- def extra_user_roles(self) -> Optional[str]:
- """Returns the extra user roles that were requested."""
- return self.relation.data[self.relation.app].get("extra-user-roles")
-
-
-class AuthenticationEvent(RelationEvent):
- """Base class for authentication fields for events."""
-
- @property
- def username(self) -> Optional[str]:
- """Returns the created username."""
- return self.relation.data[self.relation.app].get("username")
-
- @property
- def password(self) -> Optional[str]:
- """Returns the password for the created user."""
- return self.relation.data[self.relation.app].get("password")
-
- @property
- def tls(self) -> Optional[str]:
- """Returns whether TLS is configured."""
- return self.relation.data[self.relation.app].get("tls")
-
- @property
- def tls_ca(self) -> Optional[str]:
- """Returns TLS CA."""
- return self.relation.data[self.relation.app].get("tls-ca")
-
-
-# Database related events and fields
-
-
-class DatabaseProvidesEvent(RelationEvent):
- """Base class for database events."""
-
- @property
- def database(self) -> Optional[str]:
- """Returns the database that was requested."""
- return self.relation.data[self.relation.app].get("database")
-
-
-class DatabaseRequestedEvent(DatabaseProvidesEvent, ExtraRoleEvent):
- """Event emitted when a new database is requested for use on this relation."""
-
-
-class DatabaseProvidesEvents(CharmEvents):
- """Database events.
-
- This class defines the events that the database can emit.
- """
-
- database_requested = EventSource(DatabaseRequestedEvent)
-
-
-class DatabaseRequiresEvent(RelationEvent):
- """Base class for database events."""
-
- @property
- def endpoints(self) -> Optional[str]:
- """Returns a comma separated list of read/write endpoints."""
- return self.relation.data[self.relation.app].get("endpoints")
-
- @property
- def read_only_endpoints(self) -> Optional[str]:
- """Returns a comma separated list of read only endpoints."""
- return self.relation.data[self.relation.app].get("read-only-endpoints")
-
- @property
- def replset(self) -> Optional[str]:
- """Returns the replicaset name.
-
- MongoDB only.
- """
- return self.relation.data[self.relation.app].get("replset")
-
- @property
- def uris(self) -> Optional[str]:
- """Returns the connection URIs.
-
- MongoDB, Redis, OpenSearch.
- """
- return self.relation.data[self.relation.app].get("uris")
-
- @property
- def version(self) -> Optional[str]:
- """Returns the version of the database.
-
- Version as informed by the database daemon.
- """
- return self.relation.data[self.relation.app].get("version")
-
-
-class DatabaseCreatedEvent(AuthenticationEvent, DatabaseRequiresEvent):
- """Event emitted when a new database is created for use on this relation."""
-
-
-class DatabaseEndpointsChangedEvent(AuthenticationEvent, DatabaseRequiresEvent):
- """Event emitted when the read/write endpoints are changed."""
-
-
-class DatabaseReadOnlyEndpointsChangedEvent(AuthenticationEvent, DatabaseRequiresEvent):
- """Event emitted when the read only endpoints are changed."""
-
-
-class DatabaseRequiresEvents(CharmEvents):
- """Database events.
-
- This class defines the events that the database can emit.
- """
-
- database_created = EventSource(DatabaseCreatedEvent)
- endpoints_changed = EventSource(DatabaseEndpointsChangedEvent)
- read_only_endpoints_changed = EventSource(DatabaseReadOnlyEndpointsChangedEvent)
-
-
-# Database Provider and Requires
-
-
-class DatabaseProvides(DataProvides):
- """Provider-side of the database relations."""
-
- on = DatabaseProvidesEvents()
-
- def __init__(self, charm: CharmBase, relation_name: str) -> None:
- super().__init__(charm, relation_name)
-
- def _on_relation_changed(self, event: RelationChangedEvent) -> None:
- """Event emitted when the relation has changed."""
- # Only the leader should handle this event.
- if not self.local_unit.is_leader():
- return
-
- # Check which data has changed to emit customs events.
- diff = self._diff(event)
-
- # Emit a database requested event if the setup key (database name and optional
- # extra user roles) was added to the relation databag by the application.
- if "database" in diff.added:
- self.on.database_requested.emit(event.relation, app=event.app, unit=event.unit)
-
- def set_endpoints(self, relation_id: int, connection_strings: str) -> None:
- """Set database primary connections.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- connection_strings: database hosts and ports comma separated list.
- """
- self._update_relation_data(relation_id, {"endpoints": connection_strings})
-
- def set_read_only_endpoints(self, relation_id: int, connection_strings: str) -> None:
- """Set database replicas connection strings.
-
- This function writes in the application data bag, therefore,
- only the leader unit can call it.
-
- Args:
- relation_id: the identifier for a particular relation.
- connection_strings: database hosts and ports comma separated list.
- """
- self._update_relation_data(relation_id, {"read-only-endpoints": connection_strings})
-
- def set_replset(self, relation_id: int, replset: str) -> None:
- """Set replica set name in the application relation databag.
-
- MongoDB only.
-
- Args:
- relation_id: the identifier for a particular relation.
- replset: replica set name.
- """
- self._update_relation_data(relation_id, {"replset": replset})
-
- def set_uris(self, relation_id: int, uris: str) -> None:
- """Set the database connection URIs in the application relation databag.
-
- MongoDB, Redis, and OpenSearch only.
-
- Args:
- relation_id: the identifier for a particular relation.
- uris: connection URIs.
- """
- self._update_relation_data(relation_id, {"uris": uris})
-
- def set_version(self, relation_id: int, version: str) -> None:
- """Set the database version in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- version: database version.
- """
- self._update_relation_data(relation_id, {"version": version})
-
-
-class DatabaseRequires(DataRequires):
- """Requires-side of the database relation."""
-
- on = DatabaseRequiresEvents()
-
- def __init__(
- self,
- charm,
- relation_name: str,
- database_name: str,
- extra_user_roles: str = None,
- relations_aliases: List[str] = None,
- ):
- """Manager of database client relations."""
- super().__init__(charm, relation_name, extra_user_roles)
- self.database = database_name
- self.relations_aliases = relations_aliases
-
- # Define custom event names for each alias.
- if relations_aliases:
- # Ensure the number of aliases does not exceed the maximum
- # of connections allowed in the specific relation.
- relation_connection_limit = self.charm.meta.requires[relation_name].limit
- if len(relations_aliases) != relation_connection_limit:
- raise ValueError(
- f"The number of aliases must match the maximum number of connections allowed in the relation. "
- f"Expected {relation_connection_limit}, got {len(relations_aliases)}"
- )
-
- for relation_alias in relations_aliases:
- self.on.define_event(f"{relation_alias}_database_created", DatabaseCreatedEvent)
- self.on.define_event(
- f"{relation_alias}_endpoints_changed", DatabaseEndpointsChangedEvent
- )
- self.on.define_event(
- f"{relation_alias}_read_only_endpoints_changed",
- DatabaseReadOnlyEndpointsChangedEvent,
- )
-
- def _assign_relation_alias(self, relation_id: int) -> None:
- """Assigns an alias to a relation.
-
- This function writes in the unit data bag.
-
- Args:
- relation_id: the identifier for a particular relation.
- """
- # If no aliases were provided, return immediately.
- if not self.relations_aliases:
- return
-
- # Return if an alias was already assigned to this relation
- # (like when there are more than one unit joining the relation).
- if (
- self.charm.model.get_relation(self.relation_name, relation_id)
- .data[self.local_unit]
- .get("alias")
- ):
- return
-
- # Retrieve the available aliases (the ones that weren't assigned to any relation).
- available_aliases = self.relations_aliases[:]
- for relation in self.charm.model.relations[self.relation_name]:
- alias = relation.data[self.local_unit].get("alias")
- if alias:
- logger.debug("Alias %s was already assigned to relation %d", alias, relation.id)
- available_aliases.remove(alias)
-
- # Set the alias in the unit relation databag of the specific relation.
- relation = self.charm.model.get_relation(self.relation_name, relation_id)
- relation.data[self.local_unit].update({"alias": available_aliases[0]})
-
- def _emit_aliased_event(self, event: RelationChangedEvent, event_name: str) -> None:
- """Emit an aliased event to a particular relation if it has an alias.
-
- Args:
- event: the relation changed event that was received.
- event_name: the name of the event to emit.
- """
- alias = self._get_relation_alias(event.relation.id)
- if alias:
- getattr(self.on, f"{alias}_{event_name}").emit(
- event.relation, app=event.app, unit=event.unit
- )
-
- def _get_relation_alias(self, relation_id: int) -> Optional[str]:
- """Returns the relation alias.
-
- Args:
- relation_id: the identifier for a particular relation.
-
- Returns:
- the relation alias or None if the relation was not found.
- """
- for relation in self.charm.model.relations[self.relation_name]:
- if relation.id == relation_id:
- return relation.data[self.local_unit].get("alias")
- return None
-
- def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None:
- """Event emitted when the application joins the database relation."""
- # If relations aliases were provided, assign one to the relation.
- self._assign_relation_alias(event.relation.id)
-
- # Sets both database and extra user roles in the relation
- # if the roles are provided. Otherwise, sets only the database.
- if self.extra_user_roles:
- self._update_relation_data(
- event.relation.id,
- {
- "database": self.database,
- "extra-user-roles": self.extra_user_roles,
- },
- )
- else:
- self._update_relation_data(event.relation.id, {"database": self.database})
-
- def _on_relation_changed_event(self, event: RelationChangedEvent) -> None:
- """Event emitted when the database relation has changed."""
- # Check which data has changed to emit customs events.
- diff = self._diff(event)
-
- # Check if the database is created
- # (the database charm shared the credentials).
- if "username" in diff.added and "password" in diff.added:
- # Emit the default event (the one without an alias).
- logger.info("database created at %s", datetime.now())
- self.on.database_created.emit(event.relation, app=event.app, unit=event.unit)
-
- # Emit the aliased event (if any).
- self._emit_aliased_event(event, "database_created")
-
- # To avoid unnecessary application restarts do not trigger
- # âendpoints_changedâ event if âdatabase_createdâ is triggered.
- return
-
- # Emit an endpoints changed event if the database
- # added or changed this info in the relation databag.
- if "endpoints" in diff.added or "endpoints" in diff.changed:
- # Emit the default event (the one without an alias).
- logger.info("endpoints changed on %s", datetime.now())
- self.on.endpoints_changed.emit(event.relation, app=event.app, unit=event.unit)
-
- # Emit the aliased event (if any).
- self._emit_aliased_event(event, "endpoints_changed")
-
- # To avoid unnecessary application restarts do not trigger
- # âread_only_endpoints_changedâ event if âendpoints_changedâ is triggered.
- return
-
- # Emit a read only endpoints changed event if the database
- # added or changed this info in the relation databag.
- if "read-only-endpoints" in diff.added or "read-only-endpoints" in diff.changed:
- # Emit the default event (the one without an alias).
- logger.info("read-only-endpoints changed on %s", datetime.now())
- self.on.read_only_endpoints_changed.emit(
- event.relation, app=event.app, unit=event.unit
- )
-
- # Emit the aliased event (if any).
- self._emit_aliased_event(event, "read_only_endpoints_changed")
-
-
-# Kafka related events
-
-
-class KafkaProvidesEvent(RelationEvent):
- """Base class for Kafka events."""
-
- @property
- def topic(self) -> Optional[str]:
- """Returns the topic that was requested."""
- return self.relation.data[self.relation.app].get("topic")
-
-
-class TopicRequestedEvent(KafkaProvidesEvent, ExtraRoleEvent):
- """Event emitted when a new topic is requested for use on this relation."""
-
-
-class KafkaProvidesEvents(CharmEvents):
- """Kafka events.
-
- This class defines the events that the Kafka can emit.
- """
-
- topic_requested = EventSource(TopicRequestedEvent)
-
-
-class KafkaRequiresEvent(RelationEvent):
- """Base class for Kafka events."""
-
- @property
- def bootstrap_server(self) -> Optional[str]:
- """Returns a a comma-seperated list of broker uris."""
- return self.relation.data[self.relation.app].get("endpoints")
-
- @property
- def consumer_group_prefix(self) -> Optional[str]:
- """Returns the consumer-group-prefix."""
- return self.relation.data[self.relation.app].get("consumer-group-prefix")
-
- @property
- def zookeeper_uris(self) -> Optional[str]:
- """Returns a comma separated list of Zookeeper uris."""
- return self.relation.data[self.relation.app].get("zookeeper-uris")
-
-
-class TopicCreatedEvent(AuthenticationEvent, KafkaRequiresEvent):
- """Event emitted when a new topic is created for use on this relation."""
-
-
-class BootstrapServerChangedEvent(AuthenticationEvent, KafkaRequiresEvent):
- """Event emitted when the bootstrap server is changed."""
-
-
-class KafkaRequiresEvents(CharmEvents):
- """Kafka events.
-
- This class defines the events that the Kafka can emit.
- """
-
- topic_created = EventSource(TopicCreatedEvent)
- bootstrap_server_changed = EventSource(BootstrapServerChangedEvent)
-
-
-# Kafka Provides and Requires
-
-
-class KafkaProvides(DataProvides):
- """Provider-side of the Kafka relation."""
-
- on = KafkaProvidesEvents()
-
- def __init__(self, charm: CharmBase, relation_name: str) -> None:
- super().__init__(charm, relation_name)
-
- def _on_relation_changed(self, event: RelationChangedEvent) -> None:
- """Event emitted when the relation has changed."""
- # Only the leader should handle this event.
- if not self.local_unit.is_leader():
- return
-
- # Check which data has changed to emit customs events.
- diff = self._diff(event)
-
- # Emit a topic requested event if the setup key (topic name and optional
- # extra user roles) was added to the relation databag by the application.
- if "topic" in diff.added:
- self.on.topic_requested.emit(event.relation, app=event.app, unit=event.unit)
-
- def set_bootstrap_server(self, relation_id: int, bootstrap_server: str) -> None:
- """Set the bootstrap server in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- bootstrap_server: the bootstrap server address.
- """
- self._update_relation_data(relation_id, {"endpoints": bootstrap_server})
-
- def set_consumer_group_prefix(self, relation_id: int, consumer_group_prefix: str) -> None:
- """Set the consumer group prefix in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- consumer_group_prefix: the consumer group prefix string.
- """
- self._update_relation_data(relation_id, {"consumer-group-prefix": consumer_group_prefix})
-
- def set_zookeeper_uris(self, relation_id: int, zookeeper_uris: str) -> None:
- """Set the zookeeper uris in the application relation databag.
-
- Args:
- relation_id: the identifier for a particular relation.
- zookeeper_uris: comma-seperated list of ZooKeeper server uris.
- """
- self._update_relation_data(relation_id, {"zookeeper-uris": zookeeper_uris})
-
-
-class KafkaRequires(DataRequires):
- """Requires-side of the Kafka relation."""
-
- on = KafkaRequiresEvents()
-
- def __init__(self, charm, relation_name: str, topic: str, extra_user_roles: str = None):
- """Manager of Kafka client relations."""
- # super().__init__(charm, relation_name)
- super().__init__(charm, relation_name, extra_user_roles)
- self.charm = charm
- self.topic = topic
-
- def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None:
- """Event emitted when the application joins the Kafka relation."""
- # Sets both topic and extra user roles in the relation
- # if the roles are provided. Otherwise, sets only the topic.
- self._update_relation_data(
- event.relation.id,
- {
- "topic": self.topic,
- "extra-user-roles": self.extra_user_roles,
- }
- if self.extra_user_roles is not None
- else {"topic": self.topic},
- )
-
- def _on_relation_changed_event(self, event: RelationChangedEvent) -> None:
- """Event emitted when the Kafka relation has changed."""
- # Check which data has changed to emit customs events.
- diff = self._diff(event)
-
- # Check if the topic is created
- # (the Kafka charm shared the credentials).
- if "username" in diff.added and "password" in diff.added:
- # Emit the default event (the one without an alias).
- logger.info("topic created at %s", datetime.now())
- self.on.topic_created.emit(event.relation, app=event.app, unit=event.unit)
-
- # To avoid unnecessary application restarts do not trigger
- # âendpoints_changedâ event if âtopic_createdâ is triggered.
- return
-
- # Emit an endpoints (bootstap-server) changed event if the Kakfa endpoints
- # added or changed this info in the relation databag.
- if "endpoints" in diff.added or "endpoints" in diff.changed:
- # Emit the default event (the one without an alias).
- logger.info("endpoints changed on %s", datetime.now())
- self.on.bootstrap_server_changed.emit(
- event.relation, app=event.app, unit=event.unit
- ) # here check if this is the right design
- return
diff --git a/installers/charm/osm-ro/lib/charms/kafka_k8s/v0/kafka.py b/installers/charm/osm-ro/lib/charms/kafka_k8s/v0/kafka.py
deleted file mode 100644
index aeb5edcb..00000000
--- a/installers/charm/osm-ro/lib/charms/kafka_k8s/v0/kafka.py
+++ /dev/null
@@ -1,200 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-# See LICENSE file for licensing details.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Kafka library.
-
-This [library](https://juju.is/docs/sdk/libraries) implements both sides of the
-`kafka` [interface](https://juju.is/docs/sdk/relations).
-
-The *provider* side of this interface is implemented by the
-[kafka-k8s Charmed Operator](https://charmhub.io/kafka-k8s).
-
-Any Charmed Operator that *requires* Kafka for providing its
-service should implement the *requirer* side of this interface.
-
-In a nutshell using this library to implement a Charmed Operator *requiring*
-Kafka would look like
-
-```
-$ charmcraft fetch-lib charms.kafka_k8s.v0.kafka
-```
-
-`metadata.yaml`:
-
-```
-requires:
- kafka:
- interface: kafka
- limit: 1
-```
-
-`src/charm.py`:
-
-```
-from charms.kafka_k8s.v0.kafka import KafkaEvents, KafkaRequires
-from ops.charm import CharmBase
-
-
-class MyCharm(CharmBase):
-
- on = KafkaEvents()
-
- def __init__(self, *args):
- super().__init__(*args)
- self.kafka = KafkaRequires(self)
- self.framework.observe(
- self.on.kafka_available,
- self._on_kafka_available,
- )
- self.framework.observe(
- self.on["kafka"].relation_broken,
- self._on_kafka_broken,
- )
-
- def _on_kafka_available(self, event):
- # Get Kafka host and port
- host: str = self.kafka.host
- port: int = self.kafka.port
- # host => "kafka-k8s"
- # port => 9092
-
- def _on_kafka_broken(self, event):
- # Stop service
- # ...
- self.unit.status = BlockedStatus("need kafka relation")
-```
-
-You can file bugs
-[here](https://github.com/charmed-osm/kafka-k8s-operator/issues)!
-"""
-
-from typing import Optional
-
-from ops.charm import CharmBase, CharmEvents
-from ops.framework import EventBase, EventSource, Object
-
-# The unique Charmhub library identifier, never change it
-from ops.model import Relation
-
-LIBID = "eacc8c85082347c9aae740e0220b8376"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 4
-
-
-KAFKA_HOST_APP_KEY = "host"
-KAFKA_PORT_APP_KEY = "port"
-
-
-class _KafkaAvailableEvent(EventBase):
- """Event emitted when Kafka is available."""
-
-
-class KafkaEvents(CharmEvents):
- """Kafka events.
-
- This class defines the events that Kafka can emit.
-
- Events:
- kafka_available (_KafkaAvailableEvent)
- """
-
- kafka_available = EventSource(_KafkaAvailableEvent)
-
-
-class KafkaRequires(Object):
- """Requires-side of the Kafka relation."""
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "kafka") -> None:
- super().__init__(charm, endpoint_name)
- self.charm = charm
- self._endpoint_name = endpoint_name
-
- # Observe relation events
- event_observe_mapping = {
- charm.on[self._endpoint_name].relation_changed: self._on_relation_changed,
- }
- for event, observer in event_observe_mapping.items():
- self.framework.observe(event, observer)
-
- def _on_relation_changed(self, event) -> None:
- if event.relation.app and all(
- key in event.relation.data[event.relation.app]
- for key in (KAFKA_HOST_APP_KEY, KAFKA_PORT_APP_KEY)
- ):
- self.charm.on.kafka_available.emit()
-
- @property
- def host(self) -> str:
- """Get kafka hostname."""
- relation: Relation = self.model.get_relation(self._endpoint_name)
- return (
- relation.data[relation.app].get(KAFKA_HOST_APP_KEY)
- if relation and relation.app
- else None
- )
-
- @property
- def port(self) -> int:
- """Get kafka port number."""
- relation: Relation = self.model.get_relation(self._endpoint_name)
- return (
- int(relation.data[relation.app].get(KAFKA_PORT_APP_KEY))
- if relation and relation.app
- else None
- )
-
-
-class KafkaProvides(Object):
- """Provides-side of the Kafka relation."""
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "kafka") -> None:
- super().__init__(charm, endpoint_name)
- self._endpoint_name = endpoint_name
-
- def set_host_info(self, host: str, port: int, relation: Optional[Relation] = None) -> None:
- """Set Kafka host and port.
-
- This function writes in the application data of the relation, therefore,
- only the unit leader can call it.
-
- Args:
- host (str): Kafka hostname or IP address.
- port (int): Kafka port.
- relation (Optional[Relation]): Relation to update.
- If not specified, all relations will be updated.
-
- Raises:
- Exception: if a non-leader unit calls this function.
- """
- if not self.model.unit.is_leader():
- raise Exception("only the leader set host information.")
-
- if relation:
- self._update_relation_data(host, port, relation)
- return
-
- for relation in self.model.relations[self._endpoint_name]:
- self._update_relation_data(host, port, relation)
-
- def _update_relation_data(self, host: str, port: int, relation: Relation) -> None:
- """Update data in relation if needed."""
- relation.data[self.model.app][KAFKA_HOST_APP_KEY] = host
- relation.data[self.model.app][KAFKA_PORT_APP_KEY] = str(port)
diff --git a/installers/charm/osm-ro/lib/charms/observability_libs/v1/kubernetes_service_patch.py b/installers/charm/osm-ro/lib/charms/observability_libs/v1/kubernetes_service_patch.py
deleted file mode 100644
index 506dbf03..00000000
--- a/installers/charm/osm-ro/lib/charms/observability_libs/v1/kubernetes_service_patch.py
+++ /dev/null
@@ -1,291 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-# See LICENSE file for licensing details.
-# http://www.apache.org/licenses/LICENSE-2.0
-
-"""# KubernetesServicePatch Library.
-
-This library is designed to enable developers to more simply patch the Kubernetes Service created
-by Juju during the deployment of a sidecar charm. When sidecar charms are deployed, Juju creates a
-service named after the application in the namespace (named after the Juju model). This service by
-default contains a "placeholder" port, which is 65536/TCP.
-
-When modifying the default set of resources managed by Juju, one must consider the lifecycle of the
-charm. In this case, any modifications to the default service (created during deployment), will be
-overwritten during a charm upgrade.
-
-When initialised, this library binds a handler to the parent charm's `install` and `upgrade_charm`
-events which applies the patch to the cluster. This should ensure that the service ports are
-correct throughout the charm's life.
-
-The constructor simply takes a reference to the parent charm, and a list of
-[`lightkube`](https://github.com/gtsystem/lightkube) ServicePorts that each define a port for the
-service. For information regarding the `lightkube` `ServicePort` model, please visit the
-`lightkube` [docs](https://gtsystem.github.io/lightkube-models/1.23/models/core_v1/#serviceport).
-
-Optionally, a name of the service (in case service name needs to be patched as well), labels,
-selectors, and annotations can be provided as keyword arguments.
-
-## Getting Started
-
-To get started using the library, you just need to fetch the library using `charmcraft`. **Note
-that you also need to add `lightkube` and `lightkube-models` to your charm's `requirements.txt`.**
-
-```shell
-cd some-charm
-charmcraft fetch-lib charms.observability_libs.v0.kubernetes_service_patch
-echo <<-EOF >> requirements.txt
-lightkube
-lightkube-models
-EOF
-```
-
-Then, to initialise the library:
-
-For `ClusterIP` services:
-
-```python
-# ...
-from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch
-from lightkube.models.core_v1 import ServicePort
-
-class SomeCharm(CharmBase):
- def __init__(self, *args):
- # ...
- port = ServicePort(443, name=f"{self.app.name}")
- self.service_patcher = KubernetesServicePatch(self, [port])
- # ...
-```
-
-For `LoadBalancer`/`NodePort` services:
-
-```python
-# ...
-from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch
-from lightkube.models.core_v1 import ServicePort
-
-class SomeCharm(CharmBase):
- def __init__(self, *args):
- # ...
- port = ServicePort(443, name=f"{self.app.name}", targetPort=443, nodePort=30666)
- self.service_patcher = KubernetesServicePatch(
- self, [port], "LoadBalancer"
- )
- # ...
-```
-
-Port protocols can also be specified. Valid protocols are `"TCP"`, `"UDP"`, and `"SCTP"`
-
-```python
-# ...
-from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch
-from lightkube.models.core_v1 import ServicePort
-
-class SomeCharm(CharmBase):
- def __init__(self, *args):
- # ...
- tcp = ServicePort(443, name=f"{self.app.name}-tcp", protocol="TCP")
- udp = ServicePort(443, name=f"{self.app.name}-udp", protocol="UDP")
- sctp = ServicePort(443, name=f"{self.app.name}-sctp", protocol="SCTP")
- self.service_patcher = KubernetesServicePatch(self, [tcp, udp, sctp])
- # ...
-```
-
-Additionally, you may wish to use mocks in your charm's unit testing to ensure that the library
-does not try to make any API calls, or open any files during testing that are unlikely to be
-present, and could break your tests. The easiest way to do this is during your test `setUp`:
-
-```python
-# ...
-
-@patch("charm.KubernetesServicePatch", lambda x, y: None)
-def setUp(self, *unused):
- self.harness = Harness(SomeCharm)
- # ...
-```
-"""
-
-import logging
-from types import MethodType
-from typing import List, Literal
-
-from lightkube import ApiError, Client
-from lightkube.models.core_v1 import ServicePort, ServiceSpec
-from lightkube.models.meta_v1 import ObjectMeta
-from lightkube.resources.core_v1 import Service
-from lightkube.types import PatchType
-from ops.charm import CharmBase
-from ops.framework import Object
-
-logger = logging.getLogger(__name__)
-
-# The unique Charmhub library identifier, never change it
-LIBID = "0042f86d0a874435adef581806cddbbb"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 1
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 1
-
-ServiceType = Literal["ClusterIP", "LoadBalancer"]
-
-
-class KubernetesServicePatch(Object):
- """A utility for patching the Kubernetes service set up by Juju."""
-
- def __init__(
- self,
- charm: CharmBase,
- ports: List[ServicePort],
- service_name: str = None,
- service_type: ServiceType = "ClusterIP",
- additional_labels: dict = None,
- additional_selectors: dict = None,
- additional_annotations: dict = None,
- ):
- """Constructor for KubernetesServicePatch.
-
- Args:
- charm: the charm that is instantiating the library.
- ports: a list of ServicePorts
- service_name: allows setting custom name to the patched service. If none given,
- application name will be used.
- service_type: desired type of K8s service. Default value is in line with ServiceSpec's
- default value.
- additional_labels: Labels to be added to the kubernetes service (by default only
- "app.kubernetes.io/name" is set to the service name)
- additional_selectors: Selectors to be added to the kubernetes service (by default only
- "app.kubernetes.io/name" is set to the service name)
- additional_annotations: Annotations to be added to the kubernetes service.
- """
- super().__init__(charm, "kubernetes-service-patch")
- self.charm = charm
- self.service_name = service_name if service_name else self._app
- self.service = self._service_object(
- ports,
- service_name,
- service_type,
- additional_labels,
- additional_selectors,
- additional_annotations,
- )
-
- # Make mypy type checking happy that self._patch is a method
- assert isinstance(self._patch, MethodType)
- # Ensure this patch is applied during the 'install' and 'upgrade-charm' events
- self.framework.observe(charm.on.install, self._patch)
- self.framework.observe(charm.on.upgrade_charm, self._patch)
-
- def _service_object(
- self,
- ports: List[ServicePort],
- service_name: str = None,
- service_type: ServiceType = "ClusterIP",
- additional_labels: dict = None,
- additional_selectors: dict = None,
- additional_annotations: dict = None,
- ) -> Service:
- """Creates a valid Service representation.
-
- Args:
- ports: a list of ServicePorts
- service_name: allows setting custom name to the patched service. If none given,
- application name will be used.
- service_type: desired type of K8s service. Default value is in line with ServiceSpec's
- default value.
- additional_labels: Labels to be added to the kubernetes service (by default only
- "app.kubernetes.io/name" is set to the service name)
- additional_selectors: Selectors to be added to the kubernetes service (by default only
- "app.kubernetes.io/name" is set to the service name)
- additional_annotations: Annotations to be added to the kubernetes service.
-
- Returns:
- Service: A valid representation of a Kubernetes Service with the correct ports.
- """
- if not service_name:
- service_name = self._app
- labels = {"app.kubernetes.io/name": self._app}
- if additional_labels:
- labels.update(additional_labels)
- selector = {"app.kubernetes.io/name": self._app}
- if additional_selectors:
- selector.update(additional_selectors)
- return Service(
- apiVersion="v1",
- kind="Service",
- metadata=ObjectMeta(
- namespace=self._namespace,
- name=service_name,
- labels=labels,
- annotations=additional_annotations, # type: ignore[arg-type]
- ),
- spec=ServiceSpec(
- selector=selector,
- ports=ports,
- type=service_type,
- ),
- )
-
- def _patch(self, _) -> None:
- """Patch the Kubernetes service created by Juju to map the correct port.
-
- Raises:
- PatchFailed: if patching fails due to lack of permissions, or otherwise.
- """
- if not self.charm.unit.is_leader():
- return
-
- client = Client()
- try:
- if self.service_name != self._app:
- self._delete_and_create_service(client)
- client.patch(Service, self.service_name, self.service, patch_type=PatchType.MERGE)
- except ApiError as e:
- if e.status.code == 403:
- logger.error("Kubernetes service patch failed: `juju trust` this application.")
- else:
- logger.error("Kubernetes service patch failed: %s", str(e))
- else:
- logger.info("Kubernetes service '%s' patched successfully", self._app)
-
- def _delete_and_create_service(self, client: Client):
- service = client.get(Service, self._app, namespace=self._namespace)
- service.metadata.name = self.service_name # type: ignore[attr-defined]
- service.metadata.resourceVersion = service.metadata.uid = None # type: ignore[attr-defined] # noqa: E501
- client.delete(Service, self._app, namespace=self._namespace)
- client.create(service)
-
- def is_patched(self) -> bool:
- """Reports if the service patch has been applied.
-
- Returns:
- bool: A boolean indicating if the service patch has been applied.
- """
- client = Client()
- # Get the relevant service from the cluster
- service = client.get(Service, name=self.service_name, namespace=self._namespace)
- # Construct a list of expected ports, should the patch be applied
- expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports]
- # Construct a list in the same manner, using the fetched service
- fetched_ports = [(p.port, p.targetPort) for p in service.spec.ports] # type: ignore[attr-defined] # noqa: E501
- return expected_ports == fetched_ports
-
- @property
- def _app(self) -> str:
- """Name of the current Juju application.
-
- Returns:
- str: A string containing the name of the current Juju application.
- """
- return self.charm.app.name
-
- @property
- def _namespace(self) -> str:
- """The Kubernetes namespace we're running in.
-
- Returns:
- str: A string containing the name of the current Kubernetes namespace.
- """
- with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f:
- return f.read().strip()
diff --git a/installers/charm/osm-ro/lib/charms/osm_libs/v0/utils.py b/installers/charm/osm-ro/lib/charms/osm_libs/v0/utils.py
deleted file mode 100644
index d739ba68..00000000
--- a/installers/charm/osm-ro/lib/charms/osm_libs/v0/utils.py
+++ /dev/null
@@ -1,544 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-# See LICENSE file for licensing details.
-# http://www.apache.org/licenses/LICENSE-2.0
-"""OSM Utils Library.
-
-This library offers some utilities made for but not limited to Charmed OSM.
-
-# Getting started
-
-Execute the following command inside your Charmed Operator folder to fetch the library.
-
-```shell
-charmcraft fetch-lib charms.osm_libs.v0.utils
-```
-
-# CharmError Exception
-
-An exception that takes to arguments, the message and the StatusBase class, which are useful
-to set the status of the charm when the exception raises.
-
-Example:
-```shell
-from charms.osm_libs.v0.utils import CharmError
-
-class MyCharm(CharmBase):
- def _on_config_changed(self, _):
- try:
- if not self.config.get("some-option"):
- raise CharmError("need some-option", BlockedStatus)
-
- if not self.mysql_ready:
- raise CharmError("waiting for mysql", WaitingStatus)
-
- # Do stuff...
-
- exception CharmError as e:
- self.unit.status = e.status
-```
-
-# Pebble validations
-
-The `check_container_ready` function checks that a container is ready,
-and therefore Pebble is ready.
-
-The `check_service_active` function checks that a service in a container is running.
-
-Both functions raise a CharmError if the validations fail.
-
-Example:
-```shell
-from charms.osm_libs.v0.utils import check_container_ready, check_service_active
-
-class MyCharm(CharmBase):
- def _on_config_changed(self, _):
- try:
- container: Container = self.unit.get_container("my-container")
- check_container_ready(container)
- check_service_active(container, "my-service")
- # Do stuff...
-
- exception CharmError as e:
- self.unit.status = e.status
-```
-
-# Debug-mode
-
-The debug-mode allows OSM developers to easily debug OSM modules.
-
-Example:
-```shell
-from charms.osm_libs.v0.utils import DebugMode
-
-class MyCharm(CharmBase):
- _stored = StoredState()
-
- def __init__(self, _):
- # ...
- container: Container = self.unit.get_container("my-container")
- hostpaths = [
- HostPath(
- config="module-hostpath",
- container_path="/usr/lib/python3/dist-packages/module"
- ),
- ]
- vscode_workspace_path = "files/vscode-workspace.json"
- self.debug_mode = DebugMode(
- self,
- self._stored,
- container,
- hostpaths,
- vscode_workspace_path,
- )
-
- def _on_update_status(self, _):
- if self.debug_mode.started:
- return
- # ...
-
- def _get_debug_mode_information(self):
- command = self.debug_mode.command
- password = self.debug_mode.password
- return command, password
-```
-
-# More
-
-- Get pod IP with `get_pod_ip()`
-"""
-from dataclasses import dataclass
-import logging
-import secrets
-import socket
-from pathlib import Path
-from typing import List
-
-from lightkube import Client
-from lightkube.models.core_v1 import HostPathVolumeSource, Volume, VolumeMount
-from lightkube.resources.apps_v1 import StatefulSet
-from ops.charm import CharmBase
-from ops.framework import Object, StoredState
-from ops.model import (
- ActiveStatus,
- BlockedStatus,
- Container,
- MaintenanceStatus,
- StatusBase,
- WaitingStatus,
-)
-from ops.pebble import ServiceStatus
-
-# The unique Charmhub library identifier, never change it
-LIBID = "e915908eebee4cdd972d484728adf984"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 5
-
-logger = logging.getLogger(__name__)
-
-
-class CharmError(Exception):
- """Charm Error Exception."""
-
- def __init__(self, message: str, status_class: StatusBase = BlockedStatus) -> None:
- self.message = message
- self.status_class = status_class
- self.status = status_class(message)
-
-
-def check_container_ready(container: Container) -> None:
- """Check Pebble has started in the container.
-
- Args:
- container (Container): Container to be checked.
-
- Raises:
- CharmError: if container is not ready.
- """
- if not container.can_connect():
- raise CharmError("waiting for pebble to start", MaintenanceStatus)
-
-
-def check_service_active(container: Container, service_name: str) -> None:
- """Check if the service is running.
-
- Args:
- container (Container): Container to be checked.
- service_name (str): Name of the service to check.
-
- Raises:
- CharmError: if the service is not running.
- """
- if service_name not in container.get_plan().services:
- raise CharmError(f"{service_name} service not configured yet", WaitingStatus)
-
- if container.get_service(service_name).current != ServiceStatus.ACTIVE:
- raise CharmError(f"{service_name} service is not running")
-
-
-def get_pod_ip() -> str:
- """Get Kubernetes Pod IP.
-
- Returns:
- str: The IP of the Pod.
- """
- return socket.gethostbyname(socket.gethostname())
-
-
-_DEBUG_SCRIPT = r"""#!/bin/bash
-# Install SSH
-
-function download_code(){{
- wget https://go.microsoft.com/fwlink/?LinkID=760868 -O code.deb
-}}
-
-function setup_envs(){{
- grep "source /debug.envs" /root/.bashrc || echo "source /debug.envs" | tee -a /root/.bashrc
-}}
-function setup_ssh(){{
- apt install ssh -y
- cat /etc/ssh/sshd_config |
- grep -E '^PermitRootLogin yes$$' || (
- echo PermitRootLogin yes |
- tee -a /etc/ssh/sshd_config
- )
- service ssh stop
- sleep 3
- service ssh start
- usermod --password $(echo {} | openssl passwd -1 -stdin) root
-}}
-
-function setup_code(){{
- apt install libasound2 -y
- (dpkg -i code.deb || apt-get install -f -y || apt-get install -f -y) && echo Code installed successfully
- code --install-extension ms-python.python --user-data-dir /root
- mkdir -p /root/.vscode-server
- cp -R /root/.vscode/extensions /root/.vscode-server/extensions
-}}
-
-export DEBIAN_FRONTEND=noninteractive
-apt update && apt install wget -y
-download_code &
-setup_ssh &
-setup_envs
-wait
-setup_code &
-wait
-"""
-
-
-@dataclass
-class SubModule:
- """Represent RO Submodules."""
- sub_module_path: str
- container_path: str
-
-
-class HostPath:
- """Represents a hostpath."""
- def __init__(self, config: str, container_path: str, submodules: dict = None) -> None:
- mount_path_items = config.split("-")
- mount_path_items.reverse()
- self.mount_path = "/" + "/".join(mount_path_items)
- self.config = config
- self.sub_module_dict = {}
- if submodules:
- for submodule in submodules.keys():
- self.sub_module_dict[submodule] = SubModule(
- sub_module_path=self.mount_path + "/" + submodule + "/" + submodules[submodule].split("/")[-1],
- container_path=submodules[submodule],
- )
- else:
- self.container_path = container_path
- self.module_name = container_path.split("/")[-1]
-
-class DebugMode(Object):
- """Class to handle the debug-mode."""
-
- def __init__(
- self,
- charm: CharmBase,
- stored: StoredState,
- container: Container,
- hostpaths: List[HostPath] = [],
- vscode_workspace_path: str = "files/vscode-workspace.json",
- ) -> None:
- super().__init__(charm, "debug-mode")
-
- self.charm = charm
- self._stored = stored
- self.hostpaths = hostpaths
- self.vscode_workspace = Path(vscode_workspace_path).read_text()
- self.container = container
-
- self._stored.set_default(
- debug_mode_started=False,
- debug_mode_vscode_command=None,
- debug_mode_password=None,
- )
-
- self.framework.observe(self.charm.on.config_changed, self._on_config_changed)
- self.framework.observe(self.charm.on[container.name].pebble_ready, self._on_config_changed)
- self.framework.observe(self.charm.on.update_status, self._on_update_status)
-
- def _on_config_changed(self, _) -> None:
- """Handler for the config-changed event."""
- if not self.charm.unit.is_leader():
- return
-
- debug_mode_enabled = self.charm.config.get("debug-mode", False)
- action = self.enable if debug_mode_enabled else self.disable
- action()
-
- def _on_update_status(self, _) -> None:
- """Handler for the update-status event."""
- if not self.charm.unit.is_leader() or not self.started:
- return
-
- self.charm.unit.status = ActiveStatus("debug-mode: ready")
-
- @property
- def started(self) -> bool:
- """Indicates whether the debug-mode has started or not."""
- return self._stored.debug_mode_started
-
- @property
- def command(self) -> str:
- """Command to launch vscode."""
- return self._stored.debug_mode_vscode_command
-
- @property
- def password(self) -> str:
- """SSH password."""
- return self._stored.debug_mode_password
-
- def enable(self, service_name: str = None) -> None:
- """Enable debug-mode.
-
- This function mounts hostpaths of the OSM modules (if set), and
- configures the container so it can be easily debugged. The setup
- includes the configuration of SSH, environment variables, and
- VSCode workspace and plugins.
-
- Args:
- service_name (str, optional): Pebble service name which has the desired environment
- variables. Mandatory if there is more than one Pebble service configured.
- """
- hostpaths_to_reconfigure = self._hostpaths_to_reconfigure()
- if self.started and not hostpaths_to_reconfigure:
- self.charm.unit.status = ActiveStatus("debug-mode: ready")
- return
-
- logger.debug("enabling debug-mode")
-
- # Mount hostpaths if set.
- # If hostpaths are mounted, the statefulset will be restarted,
- # and for that reason we return immediately. On restart, the hostpaths
- # won't be mounted and then we can continue and setup the debug-mode.
- if hostpaths_to_reconfigure:
- self.charm.unit.status = MaintenanceStatus("debug-mode: configuring hostpaths")
- self._configure_hostpaths(hostpaths_to_reconfigure)
- return
-
- self.charm.unit.status = MaintenanceStatus("debug-mode: starting")
- password = secrets.token_hex(8)
- self._setup_debug_mode(
- password,
- service_name,
- mounted_hostpaths=[hp for hp in self.hostpaths if self.charm.config.get(hp.config)],
- )
-
- self._stored.debug_mode_vscode_command = self._get_vscode_command(get_pod_ip())
- self._stored.debug_mode_password = password
- self._stored.debug_mode_started = True
- logger.info("debug-mode is ready")
- self.charm.unit.status = ActiveStatus("debug-mode: ready")
-
- def disable(self) -> None:
- """Disable debug-mode."""
- logger.debug("disabling debug-mode")
- current_status = self.charm.unit.status
- hostpaths_unmounted = self._unmount_hostpaths()
-
- if not self._stored.debug_mode_started:
- return
- self._stored.debug_mode_started = False
- self._stored.debug_mode_vscode_command = None
- self._stored.debug_mode_password = None
-
- if not hostpaths_unmounted:
- self.charm.unit.status = current_status
- self._restart()
-
- def _hostpaths_to_reconfigure(self) -> List[HostPath]:
- hostpaths_to_reconfigure: List[HostPath] = []
- client = Client()
- statefulset = client.get(StatefulSet, self.charm.app.name, namespace=self.charm.model.name)
- volumes = statefulset.spec.template.spec.volumes
-
- for hostpath in self.hostpaths:
- hostpath_is_set = True if self.charm.config.get(hostpath.config) else False
- hostpath_already_configured = next(
- (True for volume in volumes if volume.name == hostpath.config), False
- )
- if hostpath_is_set != hostpath_already_configured:
- hostpaths_to_reconfigure.append(hostpath)
-
- return hostpaths_to_reconfigure
-
- def _setup_debug_mode(
- self,
- password: str,
- service_name: str = None,
- mounted_hostpaths: List[HostPath] = [],
- ) -> None:
- services = self.container.get_plan().services
- if not service_name and len(services) != 1:
- raise Exception("Cannot start debug-mode: please set the service_name")
-
- service = None
- if not service_name:
- service_name, service = services.popitem()
- if not service:
- service = services.get(service_name)
-
- logger.debug(f"getting environment variables from service {service_name}")
- environment = service.environment
- environment_file_content = "\n".join(
- [f'export {key}="{value}"' for key, value in environment.items()]
- )
- logger.debug(f"pushing environment file to {self.container.name} container")
- self.container.push("/debug.envs", environment_file_content)
-
- # Push VSCode workspace
- logger.debug(f"pushing vscode workspace to {self.container.name} container")
- self.container.push("/debug.code-workspace", self.vscode_workspace)
-
- # Execute debugging script
- logger.debug(f"pushing debug-mode setup script to {self.container.name} container")
- self.container.push("/debug.sh", _DEBUG_SCRIPT.format(password), permissions=0o777)
- logger.debug(f"executing debug-mode setup script in {self.container.name} container")
- self.container.exec(["/debug.sh"]).wait_output()
- logger.debug(f"stopping service {service_name} in {self.container.name} container")
- self.container.stop(service_name)
-
- # Add symlinks to mounted hostpaths
- for hostpath in mounted_hostpaths:
- logger.debug(f"adding symlink for {hostpath.config}")
- if len(hostpath.sub_module_dict) > 0:
- for sub_module in hostpath.sub_module_dict.keys():
- self.container.exec(["rm", "-rf", hostpath.sub_module_dict[sub_module].container_path]).wait_output()
- self.container.exec(
- [
- "ln",
- "-s",
- hostpath.sub_module_dict[sub_module].sub_module_path,
- hostpath.sub_module_dict[sub_module].container_path,
- ]
- )
-
- else:
- self.container.exec(["rm", "-rf", hostpath.container_path]).wait_output()
- self.container.exec(
- [
- "ln",
- "-s",
- f"{hostpath.mount_path}/{hostpath.module_name}",
- hostpath.container_path,
- ]
- )
-
- def _configure_hostpaths(self, hostpaths: List[HostPath]):
- client = Client()
- statefulset = client.get(StatefulSet, self.charm.app.name, namespace=self.charm.model.name)
-
- for hostpath in hostpaths:
- if self.charm.config.get(hostpath.config):
- self._add_hostpath_to_statefulset(hostpath, statefulset)
- else:
- self._delete_hostpath_from_statefulset(hostpath, statefulset)
-
- client.replace(statefulset)
-
- def _unmount_hostpaths(self) -> bool:
- client = Client()
- hostpath_unmounted = False
- statefulset = client.get(StatefulSet, self.charm.app.name, namespace=self.charm.model.name)
-
- for hostpath in self.hostpaths:
- if self._delete_hostpath_from_statefulset(hostpath, statefulset):
- hostpath_unmounted = True
-
- if hostpath_unmounted:
- client.replace(statefulset)
-
- return hostpath_unmounted
-
- def _add_hostpath_to_statefulset(self, hostpath: HostPath, statefulset: StatefulSet):
- # Add volume
- logger.debug(f"adding volume {hostpath.config} to {self.charm.app.name} statefulset")
- volume = Volume(
- hostpath.config,
- hostPath=HostPathVolumeSource(
- path=self.charm.config[hostpath.config],
- type="Directory",
- ),
- )
- statefulset.spec.template.spec.volumes.append(volume)
-
- # Add volumeMount
- for statefulset_container in statefulset.spec.template.spec.containers:
- if statefulset_container.name != self.container.name:
- continue
-
- logger.debug(
- f"adding volumeMount {hostpath.config} to {self.container.name} container"
- )
- statefulset_container.volumeMounts.append(
- VolumeMount(mountPath=hostpath.mount_path, name=hostpath.config)
- )
-
- def _delete_hostpath_from_statefulset(self, hostpath: HostPath, statefulset: StatefulSet):
- hostpath_unmounted = False
- for volume in statefulset.spec.template.spec.volumes:
-
- if hostpath.config != volume.name:
- continue
-
- # Remove volumeMount
- for statefulset_container in statefulset.spec.template.spec.containers:
- if statefulset_container.name != self.container.name:
- continue
- for volume_mount in statefulset_container.volumeMounts:
- if volume_mount.name != hostpath.config:
- continue
-
- logger.debug(
- f"removing volumeMount {hostpath.config} from {self.container.name} container"
- )
- statefulset_container.volumeMounts.remove(volume_mount)
-
- # Remove volume
- logger.debug(
- f"removing volume {hostpath.config} from {self.charm.app.name} statefulset"
- )
- statefulset.spec.template.spec.volumes.remove(volume)
-
- hostpath_unmounted = True
- return hostpath_unmounted
-
- def _get_vscode_command(
- self,
- pod_ip: str,
- user: str = "root",
- workspace_path: str = "/debug.code-workspace",
- ) -> str:
- return f"code --remote ssh-remote+{user}@{pod_ip} {workspace_path}"
-
- def _restart(self):
- self.container.exec(["kill", "-HUP", "1"])
diff --git a/installers/charm/osm-ro/lib/charms/osm_ro/v0/ro.py b/installers/charm/osm-ro/lib/charms/osm_ro/v0/ro.py
deleted file mode 100644
index 79bee5e7..00000000
--- a/installers/charm/osm-ro/lib/charms/osm_ro/v0/ro.py
+++ /dev/null
@@ -1,178 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# Learn more at: https://juju.is/docs/sdk
-
-"""Ro library.
-
-This [library](https://juju.is/docs/sdk/libraries) implements both sides of the
-`ro` [interface](https://juju.is/docs/sdk/relations).
-
-The *provider* side of this interface is implemented by the
-[osm-ro Charmed Operator](https://charmhub.io/osm-ro).
-
-Any Charmed Operator that *requires* RO for providing its
-service should implement the *requirer* side of this interface.
-
-In a nutshell using this library to implement a Charmed Operator *requiring*
-RO would look like
-
-```
-$ charmcraft fetch-lib charms.osm_ro.v0.ro
-```
-
-`metadata.yaml`:
-
-```
-requires:
- ro:
- interface: ro
- limit: 1
-```
-
-`src/charm.py`:
-
-```
-from charms.osm_ro.v0.ro import RoRequires
-from ops.charm import CharmBase
-
-
-class MyCharm(CharmBase):
-
- def __init__(self, *args):
- super().__init__(*args)
- self.ro = RoRequires(self)
- self.framework.observe(
- self.on["ro"].relation_changed,
- self._on_ro_relation_changed,
- )
- self.framework.observe(
- self.on["ro"].relation_broken,
- self._on_ro_relation_broken,
- )
- self.framework.observe(
- self.on["ro"].relation_broken,
- self._on_ro_broken,
- )
-
- def _on_ro_relation_broken(self, event):
- # Get RO host and port
- host: str = self.ro.host
- port: int = self.ro.port
- # host => "osm-ro"
- # port => 9999
-
- def _on_ro_broken(self, event):
- # Stop service
- # ...
- self.unit.status = BlockedStatus("need ro relation")
-```
-
-You can file bugs
-[here](https://osm.etsi.org/bugzilla/enter_bug.cgi), selecting the `devops` module!
-"""
-from typing import Optional
-
-from ops.charm import CharmBase, CharmEvents
-from ops.framework import EventBase, EventSource, Object
-from ops.model import Relation
-
-
-# The unique Charmhub library identifier, never change it
-LIBID = "a34c3331a43f4f6db2b1499ff4d1390d"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 1
-
-RO_HOST_APP_KEY = "host"
-RO_PORT_APP_KEY = "port"
-
-
-class RoRequires(Object): # pragma: no cover
- """Requires-side of the Ro relation."""
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "ro") -> None:
- super().__init__(charm, endpoint_name)
- self.charm = charm
- self._endpoint_name = endpoint_name
-
- @property
- def host(self) -> str:
- """Get ro hostname."""
- relation: Relation = self.model.get_relation(self._endpoint_name)
- return (
- relation.data[relation.app].get(RO_HOST_APP_KEY)
- if relation and relation.app
- else None
- )
-
- @property
- def port(self) -> int:
- """Get ro port number."""
- relation: Relation = self.model.get_relation(self._endpoint_name)
- return (
- int(relation.data[relation.app].get(RO_PORT_APP_KEY))
- if relation and relation.app
- else None
- )
-
-
-class RoProvides(Object):
- """Provides-side of the Ro relation."""
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "ro") -> None:
- super().__init__(charm, endpoint_name)
- self._endpoint_name = endpoint_name
-
- def set_host_info(self, host: str, port: int, relation: Optional[Relation] = None) -> None:
- """Set Ro host and port.
-
- This function writes in the application data of the relation, therefore,
- only the unit leader can call it.
-
- Args:
- host (str): Ro hostname or IP address.
- port (int): Ro port.
- relation (Optional[Relation]): Relation to update.
- If not specified, all relations will be updated.
-
- Raises:
- Exception: if a non-leader unit calls this function.
- """
- if not self.model.unit.is_leader():
- raise Exception("only the leader set host information.")
-
- if relation:
- self._update_relation_data(host, port, relation)
- return
-
- for relation in self.model.relations[self._endpoint_name]:
- self._update_relation_data(host, port, relation)
-
- def _update_relation_data(self, host: str, port: int, relation: Relation) -> None:
- """Update data in relation if needed."""
- relation.data[self.model.app][RO_HOST_APP_KEY] = host
- relation.data[self.model.app][RO_PORT_APP_KEY] = str(port)
diff --git a/installers/charm/osm-ro/metadata.yaml b/installers/charm/osm-ro/metadata.yaml
deleted file mode 100644
index a94036ac..00000000
--- a/installers/charm/osm-ro/metadata.yaml
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# This file populates the Overview on Charmhub.
-# See https://juju.is/docs/some-url-to-be-determined/ for a checklist and guidance.
-
-name: osm-ro
-
-# The following metadata are human-readable and will be published prominently on Charmhub.
-
-display-name: OSM RO
-
-summary: OSM Resource Orchestrator (RO)
-
-description: |
- A Kubernetes operator that deploys the Resource Orchestrator of OSM.
-
- Resource orchestrator module's main responsibility is managing the
- VIM and SDN operations by taking orders through the LCM and Kafka
- message queue.
-
- This charm doesn't make sense on its own.
- See more:
- - https://charmhub.io/osm
-
-containers:
- ro:
- resource: ro-image
-
-# This file populates the Resources tab on Charmhub.
-
-resources:
- ro-image:
- type: oci-image
- description: OCI image for ro
- upstream-source: opensourcemano/ro
-
-requires:
- kafka:
- interface: kafka
- limit: 1
- mongodb:
- interface: mongodb_client
- limit: 1
-
-provides:
- ro:
- interface: ro
diff --git a/installers/charm/osm-ro/pyproject.toml b/installers/charm/osm-ro/pyproject.toml
deleted file mode 100644
index 16cf0f4b..00000000
--- a/installers/charm/osm-ro/pyproject.toml
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-
-# Testing tools configuration
-[tool.coverage.run]
-branch = true
-
-[tool.coverage.report]
-show_missing = true
-
-[tool.pytest.ini_options]
-minversion = "6.0"
-log_cli_level = "INFO"
-
-# Formatting tools configuration
-[tool.black]
-line-length = 99
-target-version = ["py38"]
-
-[tool.isort]
-profile = "black"
-
-# Linting tools configuration
-[tool.flake8]
-max-line-length = 99
-max-doc-length = 99
-max-complexity = 10
-exclude = [".git", "__pycache__", ".tox", "build", "dist", "*.egg_info", "venv"]
-select = ["E", "W", "F", "C", "N", "R", "D", "H"]
-# Ignore W503, E501 because using black creates errors with this
-# Ignore D107 Missing docstring in __init__
-ignore = ["W503", "E501", "D107"]
-# D100, D101, D102, D103: Ignore missing docstrings in tests
-per-file-ignores = ["tests/*:D100,D101,D102,D103,D104"]
-docstring-convention = "google"
diff --git a/installers/charm/osm-ro/requirements.txt b/installers/charm/osm-ro/requirements.txt
deleted file mode 100644
index 398d4ad3..00000000
--- a/installers/charm/osm-ro/requirements.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-ops < 2.2
-lightkube
-lightkube-models
-# git+https://github.com/charmed-osm/config-validator/
diff --git a/installers/charm/osm-ro/src/charm.py b/installers/charm/osm-ro/src/charm.py
deleted file mode 100755
index 89da4f12..00000000
--- a/installers/charm/osm-ro/src/charm.py
+++ /dev/null
@@ -1,338 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-#
-# Learn more at: https://juju.is/docs/sdk
-
-"""OSM RO charm.
-
-See more: https://charmhub.io/osm
-"""
-
-import base64
-import logging
-from typing import Any, Dict
-
-from charms.data_platform_libs.v0.data_interfaces import DatabaseRequires
-from charms.kafka_k8s.v0.kafka import KafkaEvents, KafkaRequires
-from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch
-from charms.osm_libs.v0.utils import (
- CharmError,
- DebugMode,
- HostPath,
- check_container_ready,
- check_service_active,
-)
-from charms.osm_ro.v0.ro import RoProvides
-from lightkube.models.core_v1 import ServicePort
-from ops.charm import ActionEvent, CharmBase, RelationJoinedEvent
-from ops.framework import StoredState
-from ops.main import main
-from ops.model import ActiveStatus, Container
-
-ro_host_paths = {
- "NG-RO": "/usr/lib/python3/dist-packages/osm_ng_ro",
- "RO-plugin": "/usr/lib/python3/dist-packages/osm_ro_plugin",
- "RO-SDN-arista_cloudvision": "/usr/lib/python3/dist-packages/osm_rosdn_arista_cloudvision",
- "RO-SDN-dpb": "/usr/lib/python3/dist-packages/osm_rosdn_dpb",
- "RO-SDN-dynpac": "/usr/lib/python3/dist-packages/osm_rosdn_dynpac",
- "RO-SDN-floodlight_openflow": "/usr/lib/python3/dist-packages/osm_rosdn_floodlightof",
- "RO-SDN-ietfl2vpn": "/usr/lib/python3/dist-packages/osm_rosdn_ietfl2vpn",
- "RO-SDN-juniper_contrail": "/usr/lib/python3/dist-packages/osm_rosdn_juniper_contrail",
- "RO-SDN-odl_openflow": "/usr/lib/python3/dist-packages/osm_rosdn_odlof",
- "RO-SDN-onos_openflow": "/usr/lib/python3/dist-packages/osm_rosdn_onosof",
- "RO-SDN-onos_vpls": "/usr/lib/python3/dist-packages/osm_rosdn_onos_vpls",
- "RO-VIM-aws": "/usr/lib/python3/dist-packages/osm_rovim_aws",
- "RO-VIM-azure": "/usr/lib/python3/dist-packages/osm_rovim_azure",
- "RO-VIM-gcp": "/usr/lib/python3/dist-packages/osm_rovim_gcp",
- "RO-VIM-openstack": "/usr/lib/python3/dist-packages/osm_rovim_openstack",
- "RO-VIM-openvim": "/usr/lib/python3/dist-packages/osm_rovim_openvim",
- "RO-VIM-vmware": "/usr/lib/python3/dist-packages/osm_rovim_vmware",
-}
-HOSTPATHS = [
- HostPath(
- config="ro-hostpath",
- container_path="/usr/lib/python3/dist-packages/",
- submodules=ro_host_paths,
- ),
- HostPath(
- config="common-hostpath",
- container_path="/usr/lib/python3/dist-packages/osm_common",
- ),
-]
-SERVICE_PORT = 9090
-USER = GROUP = "appuser"
-
-logger = logging.getLogger(__name__)
-
-
-def decode(content: str):
- """Base64 decoding of a string."""
- return base64.b64decode(content.encode("utf-8")).decode("utf-8")
-
-
-class OsmRoCharm(CharmBase):
- """OSM RO Kubernetes sidecar charm."""
-
- on = KafkaEvents()
- service_name = "ro"
- _stored = StoredState()
-
- def __init__(self, *args):
- super().__init__(*args)
- self._stored.set_default(certificates=set())
- self.kafka = KafkaRequires(self)
- self.mongodb_client = DatabaseRequires(self, "mongodb", database_name="osm")
- self._observe_charm_events()
- self._patch_k8s_service()
- self.ro = RoProvides(self)
- self.container: Container = self.unit.get_container("ro")
- self.debug_mode = DebugMode(self, self._stored, self.container, HOSTPATHS)
-
- # ---------------------------------------------------------------------------
- # Handlers for Charm Events
- # ---------------------------------------------------------------------------
-
- def _on_config_changed(self, _) -> None:
- """Handler for the config-changed event."""
- try:
- self._validate_config()
- self._check_relations()
- # Check if the container is ready.
- # Eventually it will become ready after the first pebble-ready event.
- check_container_ready(self.container)
-
- self._configure_certificates()
- if not self.debug_mode.started:
- self._configure_service()
- self._update_ro_relation()
-
- # Update charm status
- self._on_update_status()
- except CharmError as e:
- logger.debug(e.message)
- self.unit.status = e.status
-
- def _on_update_status(self, _=None) -> None:
- """Handler for the update-status event."""
- try:
- self._validate_config()
- self._check_relations()
- check_container_ready(self.container)
- if self.debug_mode.started:
- return
- check_service_active(self.container, self.service_name)
- self.unit.status = ActiveStatus()
- except CharmError as e:
- logger.debug(e.message)
- self.unit.status = e.status
-
- def _on_required_relation_broken(self, _) -> None:
- """Handler for the kafka-broken event."""
- try:
- check_container_ready(self.container)
- check_service_active(self.container, "ro")
- self.container.stop("ro")
- except CharmError:
- pass
-
- self._on_update_status()
-
- def _update_ro_relation(self, event: RelationJoinedEvent = None) -> None:
- """Handler for the ro-relation-joined event."""
- try:
- if self.unit.is_leader():
- check_container_ready(self.container)
- check_service_active(self.container, "ro")
- self.ro.set_host_info(
- self.app.name, SERVICE_PORT, event.relation if event else None
- )
- except CharmError as e:
- self.unit.status = e.status
-
- def _on_get_debug_mode_information_action(self, event: ActionEvent) -> None:
- """Handler for the get-debug-mode-information action event."""
- if not self.debug_mode.started:
- event.fail(
- f"debug-mode has not started. Hint: juju config {self.app.name} debug-mode=true"
- )
- return
-
- debug_info = {"command": self.debug_mode.command, "password": self.debug_mode.password}
- event.set_results(debug_info)
-
- # ---------------------------------------------------------------------------
- # Validation and configuration and more
- # ---------------------------------------------------------------------------
-
- def _patch_k8s_service(self) -> None:
- port = ServicePort(SERVICE_PORT, name=f"{self.app.name}")
- self.service_patcher = KubernetesServicePatch(self, [port])
-
- def _observe_charm_events(self) -> None:
- event_handler_mapping = {
- # Core lifecycle events
- self.on.ro_pebble_ready: self._on_config_changed,
- self.on.config_changed: self._on_config_changed,
- self.on.update_status: self._on_update_status,
- # Relation events
- self.on.kafka_available: self._on_config_changed,
- self.on["kafka"].relation_broken: self._on_required_relation_broken,
- self.mongodb_client.on.database_created: self._on_config_changed,
- self.on["mongodb"].relation_broken: self._on_required_relation_broken,
- self.on.ro_relation_joined: self._update_ro_relation,
- # Action events
- self.on.get_debug_mode_information_action: self._on_get_debug_mode_information_action,
- }
-
- for event, handler in event_handler_mapping.items():
- self.framework.observe(event, handler)
-
- def _is_database_available(self) -> bool:
- try:
- return self.mongodb_client.is_resource_created()
- except KeyError:
- return False
-
- def _validate_config(self) -> None:
- """Validate charm configuration.
-
- Raises:
- CharmError: if charm configuration is invalid.
- """
- logger.debug("validating charm config")
- if self.config["log-level"].upper() not in [
- "TRACE",
- "DEBUG",
- "INFO",
- "WARN",
- "ERROR",
- "FATAL",
- ]:
- raise CharmError("invalid value for log-level option")
-
- refresh_period = self.config.get("period_refresh_active")
- if refresh_period and refresh_period < 60 and refresh_period != -1:
- raise ValueError(
- "Refresh Period is too tight, insert >= 60 seconds or disable using -1"
- )
-
- def _check_relations(self) -> None:
- """Validate charm relations.
-
- Raises:
- CharmError: if charm configuration is invalid.
- """
- logger.debug("check for missing relations")
- missing_relations = []
-
- if not self.kafka.host or not self.kafka.port:
- missing_relations.append("kafka")
- if not self._is_database_available():
- missing_relations.append("mongodb")
-
- if missing_relations:
- relations_str = ", ".join(missing_relations)
- one_relation_missing = len(missing_relations) == 1
- error_msg = f'need {relations_str} relation{"" if one_relation_missing else "s"}'
- logger.warning(error_msg)
- raise CharmError(error_msg)
-
- def _configure_certificates(self) -> None:
- """Push certificates to the RO container."""
- if not (certificate_config := self.config.get("certificates")):
- return
-
- certificates_list = certificate_config.split(",")
- updated_certificates = set()
-
- for certificate in certificates_list:
- if ":" not in certificate:
- continue
- name, content = certificate.split(":")
- content = decode(content)
- self.container.push(
- f"/certs/{name}",
- content,
- permissions=0o400,
- make_dirs=True,
- user=USER,
- group=GROUP,
- )
- updated_certificates.add(name)
- self._stored.certificates.add(name)
- logger.info(f"certificate {name} pushed successfully")
-
- stored_certificates = {c for c in self._stored.certificates}
- for certificate_to_remove in stored_certificates.difference(updated_certificates):
- self.container.remove_path(f"/certs/{certificate_to_remove}")
- self._stored.certificates.remove(certificate_to_remove)
- logger.info(f"certificate {certificate_to_remove} removed successfully")
-
- def _configure_service(self) -> None:
- """Add Pebble layer with the ro service."""
- logger.debug(f"configuring {self.app.name} service")
- self.container.add_layer("ro", self._get_layer(), combine=True)
- self.container.replan()
-
- def _get_layer(self) -> Dict[str, Any]:
- """Get layer for Pebble."""
- return {
- "summary": "ro layer",
- "description": "pebble config layer for ro",
- "services": {
- "ro": {
- "override": "replace",
- "summary": "ro service",
- "command": "/bin/sh -c 'cd /app/osm_ro && python3 -u -m osm_ng_ro.ro_main'", # cd /app/osm_nbi is needed until we upgrade Juju to 3.x.
- "startup": "enabled",
- "user": USER,
- "group": GROUP,
- "working-dir": "/app/osm_ro", # This parameter has no effect in Juju 2.9.x.
- "environment": {
- # General configuration
- "OSMRO_LOG_LEVEL": self.config["log-level"].upper(),
- # Kafka configuration
- "OSMRO_MESSAGE_HOST": self.kafka.host,
- "OSMRO_MESSAGE_PORT": self.kafka.port,
- "OSMRO_MESSAGE_DRIVER": "kafka",
- # Database configuration
- "OSMRO_DATABASE_DRIVER": "mongo",
- "OSMRO_DATABASE_URI": self._get_mongodb_uri(),
- "OSMRO_DATABASE_COMMONKEY": self.config["database-commonkey"],
- # Storage configuration
- "OSMRO_STORAGE_DRIVER": "mongo",
- "OSMRO_STORAGE_PATH": "/app/storage",
- "OSMRO_STORAGE_COLLECTION": "files",
- "OSMRO_STORAGE_URI": self._get_mongodb_uri(),
- "OSMRO_PERIOD_REFRESH_ACTIVE": self.config.get("period_refresh_active")
- or 60,
- },
- }
- },
- }
-
- def _get_mongodb_uri(self):
- return list(self.mongodb_client.fetch_relation_data().values())[0]["uris"]
-
-
-if __name__ == "__main__": # pragma: no cover
- main(OsmRoCharm)
diff --git a/installers/charm/osm-ro/src/legacy_interfaces.py b/installers/charm/osm-ro/src/legacy_interfaces.py
deleted file mode 100644
index da9483e5..00000000
--- a/installers/charm/osm-ro/src/legacy_interfaces.py
+++ /dev/null
@@ -1,110 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-# flake8: noqa
-
-import ops
-
-
-class BaseRelationClient(ops.framework.Object):
- """Requires side of a Kafka Endpoint"""
-
- def __init__(
- self,
- charm: ops.charm.CharmBase,
- relation_name: str,
- mandatory_fields: list = [],
- ):
- super().__init__(charm, relation_name)
- self.relation_name = relation_name
- self.mandatory_fields = mandatory_fields
- self._update_relation()
-
- def get_data_from_unit(self, key: str):
- if not self.relation:
- # This update relation doesn't seem to be needed, but I added it because apparently
- # the data is empty in the unit tests.
- # In reality, the constructor is called in every hook.
- # In the unit tests when doing an update_relation_data, apparently it is not called.
- self._update_relation()
- if self.relation:
- for unit in self.relation.units:
- data = self.relation.data[unit].get(key)
- if data:
- return data
-
- def get_data_from_app(self, key: str):
- if not self.relation or self.relation.app not in self.relation.data:
- # This update relation doesn't seem to be needed, but I added it because apparently
- # the data is empty in the unit tests.
- # In reality, the constructor is called in every hook.
- # In the unit tests when doing an update_relation_data, apparently it is not called.
- self._update_relation()
- if self.relation and self.relation.app in self.relation.data:
- data = self.relation.data[self.relation.app].get(key)
- if data:
- return data
-
- def is_missing_data_in_unit(self):
- return not all([self.get_data_from_unit(field) for field in self.mandatory_fields])
-
- def is_missing_data_in_app(self):
- return not all([self.get_data_from_app(field) for field in self.mandatory_fields])
-
- def _update_relation(self):
- self.relation = self.framework.model.get_relation(self.relation_name)
-
-
-class MongoClient(BaseRelationClient):
- """Requires side of a Mongo Endpoint"""
-
- mandatory_fields_mapping = {
- "reactive": ["connection_string"],
- "ops": ["replica_set_uri", "replica_set_name"],
- }
-
- def __init__(self, charm: ops.charm.CharmBase, relation_name: str):
- super().__init__(charm, relation_name, mandatory_fields=[])
-
- @property
- def connection_string(self):
- if self.is_opts():
- replica_set_uri = self.get_data_from_unit("replica_set_uri")
- replica_set_name = self.get_data_from_unit("replica_set_name")
- return f"{replica_set_uri}?replicaSet={replica_set_name}"
- else:
- return self.get_data_from_unit("connection_string")
-
- def is_opts(self):
- return not self.is_missing_data_in_unit_ops()
-
- def is_missing_data_in_unit(self):
- return self.is_missing_data_in_unit_ops() and self.is_missing_data_in_unit_reactive()
-
- def is_missing_data_in_unit_ops(self):
- return not all(
- [self.get_data_from_unit(field) for field in self.mandatory_fields_mapping["ops"]]
- )
-
- def is_missing_data_in_unit_reactive(self):
- return not all(
- [self.get_data_from_unit(field) for field in self.mandatory_fields_mapping["reactive"]]
- )
diff --git a/installers/charm/osm-ro/tests/integration/test_charm.py b/installers/charm/osm-ro/tests/integration/test_charm.py
deleted file mode 100644
index 38e9ad9a..00000000
--- a/installers/charm/osm-ro/tests/integration/test_charm.py
+++ /dev/null
@@ -1,100 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-# Learn more about testing at: https://juju.is/docs/sdk/testing
-
-import asyncio
-import logging
-from pathlib import Path
-
-import pytest
-import yaml
-from pytest_operator.plugin import OpsTest
-
-logger = logging.getLogger(__name__)
-
-METADATA = yaml.safe_load(Path("./metadata.yaml").read_text())
-RO_APP = METADATA["name"]
-KAFKA_CHARM = "kafka-k8s"
-KAFKA_APP = "kafka"
-MONGO_DB_CHARM = "mongodb-k8s"
-MONGO_DB_APP = "mongodb"
-ZOOKEEPER_CHARM = "zookeeper-k8s"
-ZOOKEEPER_APP = "zookeeper"
-APPS = [KAFKA_APP, MONGO_DB_APP, ZOOKEEPER_APP, RO_APP]
-
-
-@pytest.mark.abort_on_fail
-async def test_ro_is_deployed(ops_test: OpsTest):
- charm = await ops_test.build_charm(".")
- resources = {"ro-image": METADATA["resources"]["ro-image"]["upstream-source"]}
-
- await asyncio.gather(
- ops_test.model.deploy(charm, resources=resources, application_name=RO_APP, series="jammy"),
- ops_test.model.deploy(ZOOKEEPER_CHARM, application_name=ZOOKEEPER_APP, channel="stable"),
- ops_test.model.deploy(KAFKA_CHARM, application_name=KAFKA_APP, channel="stable"),
- ops_test.model.deploy(MONGO_DB_CHARM, application_name=MONGO_DB_APP, channel="5/edge"),
- )
-
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS,
- timeout=300,
- )
- assert ops_test.model.applications[RO_APP].status == "blocked"
- unit = ops_test.model.applications[RO_APP].units[0]
- assert unit.workload_status_message == "need kafka, mongodb relations"
-
- logger.info("Adding relations")
- await ops_test.model.add_relation(KAFKA_APP, ZOOKEEPER_APP)
- await ops_test.model.add_relation(
- "{}:mongodb".format(RO_APP), "{}:database".format(MONGO_DB_APP)
- )
- await ops_test.model.add_relation(RO_APP, KAFKA_APP)
-
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=APPS,
- status="active",
- timeout=300,
- )
-
-
-@pytest.mark.abort_on_fail
-async def test_ro_scales(ops_test: OpsTest):
- logger.info("Scaling osm-ro")
- expected_units = 3
- assert len(ops_test.model.applications[RO_APP].units) == 1
- await ops_test.model.applications[RO_APP].scale(expected_units)
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=[RO_APP], status="active", timeout=1000, wait_for_exact_units=expected_units
- )
-
-
-@pytest.mark.abort_on_fail
-async def test_ro_blocks_without_kafka(ops_test: OpsTest):
- await asyncio.gather(ops_test.model.applications[KAFKA_APP].remove())
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(apps=[RO_APP])
- assert ops_test.model.applications[RO_APP].status == "blocked"
- for unit in ops_test.model.applications[RO_APP].units:
- assert unit.workload_status_message == "need kafka relation"
diff --git a/installers/charm/osm-ro/tests/unit/test_charm.py b/installers/charm/osm-ro/tests/unit/test_charm.py
deleted file mode 100644
index d0353abc..00000000
--- a/installers/charm/osm-ro/tests/unit/test_charm.py
+++ /dev/null
@@ -1,102 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-#
-# Learn more about testing at: https://juju.is/docs/sdk/testing
-
-import pytest
-from ops.model import ActiveStatus, BlockedStatus
-from ops.testing import Harness
-from pytest_mock import MockerFixture
-
-from charm import CharmError, OsmRoCharm, check_service_active
-
-container_name = "ro"
-service_name = "ro"
-
-
-@pytest.fixture
-def harness(mocker: MockerFixture):
- mocker.patch("charm.KubernetesServicePatch", lambda x, y: None)
- harness = Harness(OsmRoCharm)
- harness.begin()
- harness.container_pebble_ready(container_name)
- yield harness
- harness.cleanup()
-
-
-def test_missing_relations(harness: Harness):
- harness.charm.on.config_changed.emit()
- assert type(harness.charm.unit.status) == BlockedStatus
- assert all(relation in harness.charm.unit.status.message for relation in ["mongodb", "kafka"])
-
-
-def test_ready(harness: Harness):
- _add_relations(harness)
- assert harness.charm.unit.status == ActiveStatus()
-
-
-def test_container_stops_after_relation_broken(harness: Harness):
- harness.charm.on[container_name].pebble_ready.emit(container_name)
- container = harness.charm.unit.get_container(container_name)
- relation_ids = _add_relations(harness)
- check_service_active(container, service_name)
- harness.remove_relation(relation_ids[0])
- with pytest.raises(CharmError):
- check_service_active(container, service_name)
-
-
-def test_ro_relation_joined(harness: Harness):
- harness.set_leader(True)
- _add_relations(harness)
- relation_id = harness.add_relation("ro", "lcm")
- harness.add_relation_unit(relation_id, "lcm/0")
- relation_data = harness.get_relation_data(relation_id, harness.charm.app.name)
- assert harness.charm.unit.status == ActiveStatus()
- assert relation_data == {"host": harness.charm.app.name, "port": "9090"}
-
-
-def test_certificates(harness: Harness):
- # aGVsbG8K: "hello\n"
- # aGVsbG8gYWdhaW4K: "hello again\n"
- _add_relations(harness)
- harness.update_config({"certificates": "cert1:aGVsbG8K,cert2:aGVsbG8gYWdhaW4K"})
- for cert_name, content in {"cert1": "hello\n", "cert2": "hello again\n"}.items():
- assert harness.charm.container.exists(f"/certs/{cert_name}")
- assert harness.charm.container.pull(f"/certs/{cert_name}").read() == content
-
-
-def _add_relations(harness: Harness):
- relation_ids = []
- # Add mongo relation
- relation_id = harness.add_relation("mongodb", "mongodb")
- harness.add_relation_unit(relation_id, "mongodb/0")
- harness.update_relation_data(
- relation_id,
- "mongodb",
- {"uris": "mongodb://:1234", "username": "user", "password": "password"},
- )
- relation_ids.append(relation_id)
- # Add kafka relation
- relation_id = harness.add_relation("kafka", "kafka")
- harness.add_relation_unit(relation_id, "kafka/0")
- harness.update_relation_data(relation_id, "kafka", {"host": "kafka", "port": "9092"})
- relation_ids.append(relation_id)
- return relation_ids
diff --git a/installers/charm/osm-ro/tox.ini b/installers/charm/osm-ro/tox.ini
deleted file mode 100644
index c6cc629a..00000000
--- a/installers/charm/osm-ro/tox.ini
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-
-[tox]
-skipsdist=True
-skip_missing_interpreters = True
-envlist = lint, unit, integration
-
-[vars]
-src_path = {toxinidir}/src/
-tst_path = {toxinidir}/tests/
-lib_path = {toxinidir}/lib/charms/osm_ro
-all_path = {[vars]src_path} {[vars]tst_path}
-
-[testenv]
-basepython = python3.8
-setenv =
- PYTHONPATH = {toxinidir}:{toxinidir}/lib:{[vars]src_path}
- PYTHONBREAKPOINT=ipdb.set_trace
- PY_COLORS=1
-passenv =
- PYTHONPATH
- CHARM_BUILD_DIR
- MODEL_SETTINGS
-
-[testenv:fmt]
-description = Apply coding style standards to code
-deps =
- black
- isort
-commands =
- isort {[vars]all_path}
- black {[vars]all_path}
-
-[testenv:lint]
-description = Check code against coding style standards
-deps =
- black
- flake8==4.0.1
- flake8-docstrings
- flake8-builtins
- pyproject-flake8
- pep8-naming
- isort
- codespell
-commands =
- # uncomment the following line if this charm owns a lib
- codespell {[vars]lib_path} --ignore-words-list=Ro,RO,ro
- codespell {toxinidir} --skip {toxinidir}/.git --skip {toxinidir}/.tox \
- --skip {toxinidir}/build --skip {toxinidir}/lib --skip {toxinidir}/venv \
- --skip {toxinidir}/.mypy_cache --skip {toxinidir}/icon.svg --ignore-words-list=Ro,RO,ro
- # pflake8 wrapper supports config from pyproject.toml
- pflake8 {[vars]all_path}
- isort --check-only --diff {[vars]all_path}
- black --check --diff {[vars]all_path}
-
-[testenv:unit]
-description = Run unit tests
-deps =
- pytest
- pytest-mock
- coverage[toml]
- -r{toxinidir}/requirements.txt
-commands =
- coverage run --source={[vars]src_path},{[vars]lib_path} \
- -m pytest --ignore={[vars]tst_path}integration -v --tb native -s {posargs}
- coverage report
- coverage xml
-
-[testenv:integration]
-description = Run integration tests
-deps =
- pytest
- juju<3
- pytest-operator
- -r{toxinidir}/requirements.txt
-commands =
- pytest -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs} --cloud microk8s
\ No newline at end of file
diff --git a/installers/charm/osm-update-db-operator/.gitignore b/installers/charm/osm-update-db-operator/.gitignore
deleted file mode 100644
index c2501574..00000000
--- a/installers/charm/osm-update-db-operator/.gitignore
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-venv/
-build/
-*.charm
-.coverage
-coverage.xml
-__pycache__/
-*.py[cod]
-.vscode
-.tox
diff --git a/installers/charm/osm-update-db-operator/.jujuignore b/installers/charm/osm-update-db-operator/.jujuignore
deleted file mode 100644
index ddb544e6..00000000
--- a/installers/charm/osm-update-db-operator/.jujuignore
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-/venv
-*.py[cod]
-*.charm
diff --git a/installers/charm/osm-update-db-operator/CONTRIBUTING.md b/installers/charm/osm-update-db-operator/CONTRIBUTING.md
deleted file mode 100644
index 4d706713..00000000
--- a/installers/charm/osm-update-db-operator/CONTRIBUTING.md
+++ /dev/null
@@ -1,74 +0,0 @@
-
-# Contributing
-
-## Overview
-
-This documents explains the processes and practices recommended for contributing enhancements to
-the Update DB charm.
-
-- Generally, before developing enhancements to this charm, you should consider [opening an issue
- ](https://github.com/gcalvinos/update-db-operator/issues) explaining your use case.
-- If you would like to chat with us about your use-cases or proposed implementation, you can reach
- us at [Canonical Mattermost public channel](https://chat.charmhub.io/charmhub/channels/charm-dev)
- or [Discourse](https://discourse.charmhub.io/). The primary author of this charm is available on
- the Mattermost channel as `@davigar15`.
-- Familiarising yourself with the [Charmed Operator Framework](https://juju.is/docs/sdk) library
- will help you a lot when working on new features or bug fixes.
-- All enhancements require review before being merged. Code review typically examines
- - code quality
- - test coverage
- - user experience for Juju administrators this charm.
-- Please help us out in ensuring easy to review branches by rebasing your pull request branch onto
- the `main` branch. This also avoids merge commits and creates a linear Git commit history.
-
-## Developing
-
-You can use the environments created by `tox` for development:
-
-```shell
-tox --notest -e unit
-source .tox/unit/bin/activate
-```
-
-### Testing
-
-```shell
-tox -e fmt # update your code according to linting rules
-tox -e lint # code style
-tox -e unit # unit tests
-# tox -e integration # integration tests
-tox # runs 'lint' and 'unit' environments
-```
-
-## Build charm
-
-Build the charm in this git repository using:
-
-```shell
-charmcraft pack
-```
-
-### Deploy
-
-```bash
-# Create a model
-juju add-model test-update-db
-# Enable DEBUG logging
-juju model-config logging-config="=INFO;unit=DEBUG"
-# Deploy the charm
-juju deploy ./update-db_ubuntu-20.04-amd64.charm \
- --resource update-db-image=ubuntu:latest
-```
diff --git a/installers/charm/osm-update-db-operator/LICENSE b/installers/charm/osm-update-db-operator/LICENSE
deleted file mode 100644
index d6456956..00000000
--- a/installers/charm/osm-update-db-operator/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/installers/charm/osm-update-db-operator/README.md b/installers/charm/osm-update-db-operator/README.md
deleted file mode 100644
index 2ee8f6e4..00000000
--- a/installers/charm/osm-update-db-operator/README.md
+++ /dev/null
@@ -1,80 +0,0 @@
-
-
-# OSM Update DB Operator
-
-[](https://github.com/psf/black/tree/main)
-
-## Description
-
-Charm used to update the OSM databases during an OSM upgrade process. To be used you should have an instance of OSM running that you may want to upgrade
-
-## Usage
-
-### Deploy the charm (locally)
-
-```shell
-juju add-model update-db
-juju deploy osm-update-db-operator --series focal
-```
-
-Set MongoDB and MySQL URIs:
-
-```shell
-juju config osm-update-db-operator mysql-uri=
-juju config osm-update-db-operator mongodb-uri=
-```
-
-### Updating the databases
-
-In case we want to update both databases, we need to run the following command:
-
-```shell
-juju run-action osm-update-db-operator/0 update-db current-version= target-version=
-# Example:
-juju run-action osm-update-db-operator/0 update-db current-version=9 target-version=10
-```
-
-In case only you just want to update MongoDB, then we can use a flag 'mongodb-only=True':
-
-```shell
-juju run-action osm-update-db-operator/0 update-db current-version=9 target-version=10 mongodb-only=True
-```
-
-In case only you just want to update MySQL database, then we can use a flag 'mysql-only=True':
-
-```shell
-juju run-action osm-update-db-operator/0 update-db current-version=9 target-version=10 mysql-only=True
-```
-
-You can check if the update of the database was properly done checking the result of the command:
-
-```shell
-juju show-action-output
-```
-
-### Fixes for bugs
-
-Updates de database to apply the changes needed to fix a bug. You need to specify the bug number. Example:
-
-```shell
-juju run-action osm-update-db-operator/0 apply-patch bug-number=1837
-```
-
-## Contributing
-
-Please see the [Juju SDK docs](https://juju.is/docs/sdk) for guidelines
-on enhancements to this charm following best practice guidelines, and
-`CONTRIBUTING.md` for developer guidance.
diff --git a/installers/charm/osm-update-db-operator/actions.yaml b/installers/charm/osm-update-db-operator/actions.yaml
deleted file mode 100644
index aba1ee32..00000000
--- a/installers/charm/osm-update-db-operator/actions.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-update-db:
- description: |
- Updates the Mongodb and MySQL with the new data needed for the target OSM
- params:
- current-version:
- type: integer
- description: "Current version of Charmed OSM - Example: 9"
- target-version:
- type: integer
- description: "Final version of OSM after the update - Example: 10"
- mysql-only:
- type: boolean
- description: "if True the update is only applied for mysql database"
- mongodb-only:
- type: boolean
- description: "if True the update is only applied for mongo database"
- required:
- - current-version
- - target-version
-apply-patch:
- description: |
- Updates de database to apply the changes needed to fix a bug
- params:
- bug-number:
- type: integer
- description: "The number of the bug that needs to be fixed"
- required:
- - bug-number
diff --git a/installers/charm/osm-update-db-operator/charmcraft.yaml b/installers/charm/osm-update-db-operator/charmcraft.yaml
deleted file mode 100644
index 31c233b5..00000000
--- a/installers/charm/osm-update-db-operator/charmcraft.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-type: "charm"
-bases:
- - build-on:
- - name: "ubuntu"
- channel: "20.04"
- run-on:
- - name: "ubuntu"
- channel: "20.04"
-parts:
- charm:
- build-packages:
- - git
diff --git a/installers/charm/osm-update-db-operator/config.yaml b/installers/charm/osm-update-db-operator/config.yaml
deleted file mode 100644
index 3b7190b5..00000000
--- a/installers/charm/osm-update-db-operator/config.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-options:
- log-level:
- description: "Log Level"
- type: string
- default: "INFO"
- mongodb-uri:
- type: string
- description: |
- MongoDB URI (external database)
- mongodb://:/
- mysql-uri:
- type: string
- description: |
- Mysql URI with the following format:
- mysql://:@:/
diff --git a/installers/charm/osm-update-db-operator/metadata.yaml b/installers/charm/osm-update-db-operator/metadata.yaml
deleted file mode 100644
index b058591f..00000000
--- a/installers/charm/osm-update-db-operator/metadata.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-name: osm-update-db-operator
-description: |
- Charm to update the OSM databases
-summary: |
- Charm to update the OSM databases
diff --git a/installers/charm/osm-update-db-operator/pyproject.toml b/installers/charm/osm-update-db-operator/pyproject.toml
deleted file mode 100644
index 3fae1741..00000000
--- a/installers/charm/osm-update-db-operator/pyproject.toml
+++ /dev/null
@@ -1,53 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# Testing tools configuration
-[tool.coverage.run]
-branch = true
-
-[tool.coverage.report]
-show_missing = true
-
-[tool.pytest.ini_options]
-minversion = "6.0"
-log_cli_level = "INFO"
-
-# Formatting tools configuration
-[tool.black]
-line-length = 99
-target-version = ["py38"]
-
-[tool.isort]
-profile = "black"
-
-# Linting tools configuration
-[tool.flake8]
-max-line-length = 99
-max-doc-length = 99
-max-complexity = 10
-exclude = [".git", "__pycache__", ".tox", "build", "dist", "*.egg_info", "venv"]
-select = ["E", "W", "F", "C", "N", "R", "D", "H"]
-# Ignore W503, E501 because using black creates errors with this
-# Ignore D107 Missing docstring in __init__
-ignore = ["W503", "E501", "D107"]
-# D100, D101, D102, D103: Ignore missing docstrings in tests
-per-file-ignores = ["tests/*:D100,D101,D102,D103,D104"]
-docstring-convention = "google"
-# Check for properly formatted copyright header in each file
-copyright-check = "True"
-copyright-author = "Canonical Ltd."
-copyright-regexp = "Copyright\\s\\d{4}([-,]\\d{4})*\\s+%(author)s"
-
-[tool.bandit]
-tests = ["B201", "B301"]
diff --git a/installers/charm/osm-update-db-operator/requirements.txt b/installers/charm/osm-update-db-operator/requirements.txt
deleted file mode 100644
index b488dba4..00000000
--- a/installers/charm/osm-update-db-operator/requirements.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-ops < 2.2
-pymongo == 3.12.3
diff --git a/installers/charm/osm-update-db-operator/src/charm.py b/installers/charm/osm-update-db-operator/src/charm.py
deleted file mode 100755
index 32db2f76..00000000
--- a/installers/charm/osm-update-db-operator/src/charm.py
+++ /dev/null
@@ -1,119 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Update DB charm module."""
-
-import logging
-
-from ops.charm import CharmBase
-from ops.framework import StoredState
-from ops.main import main
-from ops.model import ActiveStatus, BlockedStatus
-
-from db_upgrade import MongoUpgrade, MysqlUpgrade
-
-logger = logging.getLogger(__name__)
-
-
-class UpgradeDBCharm(CharmBase):
- """Upgrade DB Charm operator."""
-
- _stored = StoredState()
-
- def __init__(self, *args):
- super().__init__(*args)
-
- # Observe events
- event_observe_mapping = {
- self.on.update_db_action: self._on_update_db_action,
- self.on.apply_patch_action: self._on_apply_patch_action,
- self.on.config_changed: self._on_config_changed,
- }
- for event, observer in event_observe_mapping.items():
- self.framework.observe(event, observer)
-
- @property
- def mongo(self):
- """Create MongoUpgrade object if the configuration has been set."""
- mongo_uri = self.config.get("mongodb-uri")
- return MongoUpgrade(mongo_uri) if mongo_uri else None
-
- @property
- def mysql(self):
- """Create MysqlUpgrade object if the configuration has been set."""
- mysql_uri = self.config.get("mysql-uri")
- return MysqlUpgrade(mysql_uri) if mysql_uri else None
-
- def _on_config_changed(self, _):
- mongo_uri = self.config.get("mongodb-uri")
- mysql_uri = self.config.get("mysql-uri")
- if not mongo_uri and not mysql_uri:
- self.unit.status = BlockedStatus("mongodb-uri and/or mysql-uri must be set")
- return
- self.unit.status = ActiveStatus()
-
- def _on_update_db_action(self, event):
- """Handle the update-db action."""
- current_version = str(event.params["current-version"])
- target_version = str(event.params["target-version"])
- mysql_only = event.params.get("mysql-only")
- mongodb_only = event.params.get("mongodb-only")
- try:
- results = {}
- if mysql_only and mongodb_only:
- raise Exception("cannot set both mysql-only and mongodb-only options to True")
- if mysql_only:
- self._upgrade_mysql(current_version, target_version)
- results["mysql"] = "Upgraded successfully"
- elif mongodb_only:
- self._upgrade_mongodb(current_version, target_version)
- results["mongodb"] = "Upgraded successfully"
- else:
- self._upgrade_mysql(current_version, target_version)
- results["mysql"] = "Upgraded successfully"
- self._upgrade_mongodb(current_version, target_version)
- results["mongodb"] = "Upgraded successfully"
- event.set_results(results)
- except Exception as e:
- event.fail(f"Failed DB Upgrade: {e}")
-
- def _upgrade_mysql(self, current_version, target_version):
- logger.debug("Upgrading mysql")
- if self.mysql:
- self.mysql.upgrade(current_version, target_version)
- else:
- raise Exception("mysql-uri not set")
-
- def _upgrade_mongodb(self, current_version, target_version):
- logger.debug("Upgrading mongodb")
- if self.mongo:
- self.mongo.upgrade(current_version, target_version)
- else:
- raise Exception("mongo-uri not set")
-
- def _on_apply_patch_action(self, event):
- bug_number = event.params["bug-number"]
- logger.debug("Patching bug number {}".format(str(bug_number)))
- try:
- if self.mongo:
- self.mongo.apply_patch(bug_number)
- else:
- raise Exception("mongo-uri not set")
- except Exception as e:
- event.fail(f"Failed Patch Application: {e}")
-
-
-if __name__ == "__main__": # pragma: no cover
- main(UpgradeDBCharm, use_juju_for_storage=True)
diff --git a/installers/charm/osm-update-db-operator/src/db_upgrade.py b/installers/charm/osm-update-db-operator/src/db_upgrade.py
deleted file mode 100644
index 295ce875..00000000
--- a/installers/charm/osm-update-db-operator/src/db_upgrade.py
+++ /dev/null
@@ -1,542 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Upgrade DB charm module."""
-
-import json
-import logging
-
-from pymongo import MongoClient
-from uuid import uuid4
-
-logger = logging.getLogger(__name__)
-
-
-class MongoUpgrade1214:
- """Upgrade MongoDB Database from OSM v12 to v14."""
-
- @staticmethod
- def gather_vnfr_healing_alerts(vnfr, vnfd):
- alerts = []
- nsr_id = vnfr["nsr-id-ref"]
- df = vnfd.get("df", [{}])[0]
- # Checking for auto-healing configuration
- if "healing-aspect" in df:
- healing_aspects = df["healing-aspect"]
- for healing in healing_aspects:
- for healing_policy in healing.get("healing-policy", ()):
- vdu_id = healing_policy["vdu-id"]
- vdur = next(
- (
- vdur
- for vdur in vnfr["vdur"]
- if vdu_id == vdur["vdu-id-ref"]
- ),
- {},
- )
- if not vdur:
- continue
- metric_name = "vm_status"
- vdu_name = vdur.get("name")
- vnf_member_index = vnfr["member-vnf-index-ref"]
- uuid = str(uuid4())
- name = f"healing_{uuid}"
- action = healing_policy
- # action_on_recovery = healing.get("action-on-recovery")
- # cooldown_time = healing.get("cooldown-time")
- # day1 = healing.get("day1")
- alert = {
- "uuid": uuid,
- "name": name,
- "metric": metric_name,
- "tags": {
- "ns_id": nsr_id,
- "vnf_member_index": vnf_member_index,
- "vdu_name": vdu_name,
- },
- "alarm_status": "ok",
- "action_type": "healing",
- "action": action,
- }
- alerts.append(alert)
- return alerts
-
- @staticmethod
- def gather_vnfr_scaling_alerts(vnfr, vnfd):
- alerts = []
- nsr_id = vnfr["nsr-id-ref"]
- df = vnfd.get("df", [{}])[0]
- # Checking for auto-scaling configuration
- if "scaling-aspect" in df:
- rel_operation_types = {
- "GE": ">=",
- "LE": "<=",
- "GT": ">",
- "LT": "<",
- "EQ": "==",
- "NE": "!=",
- }
- scaling_aspects = df["scaling-aspect"]
- all_vnfd_monitoring_params = {}
- for ivld in vnfd.get("int-virtual-link-desc", ()):
- for mp in ivld.get("monitoring-parameters", ()):
- all_vnfd_monitoring_params[mp.get("id")] = mp
- for vdu in vnfd.get("vdu", ()):
- for mp in vdu.get("monitoring-parameter", ()):
- all_vnfd_monitoring_params[mp.get("id")] = mp
- for df in vnfd.get("df", ()):
- for mp in df.get("monitoring-parameter", ()):
- all_vnfd_monitoring_params[mp.get("id")] = mp
- for scaling_aspect in scaling_aspects:
- scaling_group_name = scaling_aspect.get("name", "")
- # Get monitored VDUs
- all_monitored_vdus = set()
- for delta in scaling_aspect.get(
- "aspect-delta-details", {}
- ).get("deltas", ()):
- for vdu_delta in delta.get("vdu-delta", ()):
- all_monitored_vdus.add(vdu_delta.get("id"))
- monitored_vdurs = list(
- filter(
- lambda vdur: vdur["vdu-id-ref"]
- in all_monitored_vdus,
- vnfr["vdur"],
- )
- )
- if not monitored_vdurs:
- logger.error("Scaling criteria is referring to a vnf-monitoring-param that does not contain a reference to a vdu or vnf metric")
- continue
- for scaling_policy in scaling_aspect.get(
- "scaling-policy", ()
- ):
- if scaling_policy["scaling-type"] != "automatic":
- continue
- threshold_time = scaling_policy.get(
- "threshold-time", "1"
- )
- cooldown_time = scaling_policy.get("cooldown-time", "0")
- for scaling_criteria in scaling_policy["scaling-criteria"]:
- monitoring_param_ref = scaling_criteria.get(
- "vnf-monitoring-param-ref"
- )
- vnf_monitoring_param = all_vnfd_monitoring_params[
- monitoring_param_ref
- ]
- for vdur in monitored_vdurs:
- vdu_id = vdur["vdu-id-ref"]
- metric_name = vnf_monitoring_param.get("performance-metric")
- metric_name = f"osm_{metric_name}"
- vdu_name = vdur["name"]
- vnf_member_index = vnfr["member-vnf-index-ref"]
- scalein_threshold = scaling_criteria.get("scale-in-threshold")
- # Looking for min/max-number-of-instances
- instances_min_number = 1
- instances_max_number = 1
- vdu_profile = df["vdu-profile"]
- if vdu_profile:
- profile = next(
- item
- for item in vdu_profile
- if item["id"] == vdu_id
- )
- instances_min_number = profile.get("min-number-of-instances", 1)
- instances_max_number = profile.get("max-number-of-instances", 1)
-
- if scalein_threshold:
- uuid = str(uuid4())
- name = f"scalein_{uuid}"
- operation = scaling_criteria["scale-in-relational-operation"]
- rel_operator = rel_operation_types.get(operation, "<=")
- metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
- expression = f"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
- labels = {
- "ns_id": nsr_id,
- "vnf_member_index": vnf_member_index,
- "vdu_id": vdu_id,
- }
- prom_cfg = {
- "alert": name,
- "expr": expression,
- "for": str(threshold_time) + "m",
- "labels": labels,
- }
- action = scaling_policy
- action = {
- "scaling-group": scaling_group_name,
- "cooldown-time": cooldown_time,
- }
- alert = {
- "uuid": uuid,
- "name": name,
- "metric": metric_name,
- "tags": {
- "ns_id": nsr_id,
- "vnf_member_index": vnf_member_index,
- "vdu_id": vdu_id,
- },
- "alarm_status": "ok",
- "action_type": "scale_in",
- "action": action,
- "prometheus_config": prom_cfg,
- }
- alerts.append(alert)
-
- scaleout_threshold = scaling_criteria.get("scale-out-threshold")
- if scaleout_threshold:
- uuid = str(uuid4())
- name = f"scaleout_{uuid}"
- operation = scaling_criteria["scale-out-relational-operation"]
- rel_operator = rel_operation_types.get(operation, "<=")
- metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
- expression = f"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
- labels = {
- "ns_id": nsr_id,
- "vnf_member_index": vnf_member_index,
- "vdu_id": vdu_id,
- }
- prom_cfg = {
- "alert": name,
- "expr": expression,
- "for": str(threshold_time) + "m",
- "labels": labels,
- }
- action = scaling_policy
- action = {
- "scaling-group": scaling_group_name,
- "cooldown-time": cooldown_time,
- }
- alert = {
- "uuid": uuid,
- "name": name,
- "metric": metric_name,
- "tags": {
- "ns_id": nsr_id,
- "vnf_member_index": vnf_member_index,
- "vdu_id": vdu_id,
- },
- "alarm_status": "ok",
- "action_type": "scale_out",
- "action": action,
- "prometheus_config": prom_cfg,
- }
- alerts.append(alert)
- return alerts
-
- @staticmethod
- def _migrate_alerts(osm_db):
- """Create new alerts collection.
- """
- if "alerts" in osm_db.list_collection_names():
- return
- logger.info("Entering in MongoUpgrade1214._migrate_alerts function")
-
- # Get vnfds from MongoDB
- logger.info("Reading VNF descriptors:")
- vnfds = osm_db["vnfds"]
- db_vnfds = []
- for vnfd in vnfds.find():
- logger.info(f' {vnfd["_id"]}: {vnfd["description"]}')
- db_vnfds.append(vnfd)
-
- # Get vnfrs from MongoDB
- logger.info("Reading VNFRs")
- vnfrs = osm_db["vnfrs"]
-
- # Gather healing and scaling alerts for each vnfr
- healing_alerts = []
- scaling_alerts = []
- for vnfr in vnfrs.find():
- logger.info(f' vnfr {vnfr["_id"]}')
- vnfd = next((sub for sub in db_vnfds if sub["_id"] == vnfr["vnfd-id"]), None)
- healing_alerts.extend(MongoUpgrade1214.gather_vnfr_healing_alerts(vnfr, vnfd))
- scaling_alerts.extend(MongoUpgrade1214.gather_vnfr_scaling_alerts(vnfr, vnfd))
-
- # Add new alerts in MongoDB
- alerts = osm_db["alerts"]
- for alert in healing_alerts:
- logger.info(f"Storing healing alert in MongoDB: {alert}")
- alerts.insert_one(alert)
- for alert in scaling_alerts:
- logger.info(f"Storing scaling alert in MongoDB: {alert}")
- alerts.insert_one(alert)
-
- # Delete old alarms collections
- logger.info("Deleting alarms and alarms_action collections")
- alarms = osm_db["alarms"]
- alarms.drop()
- alarms_action = osm_db["alarms_action"]
- alarms_action.drop()
-
-
- @staticmethod
- def upgrade(mongo_uri):
- """Upgrade alerts in MongoDB."""
- logger.info("Entering in MongoUpgrade1214.upgrade function")
- myclient = MongoClient(mongo_uri)
- osm_db = myclient["osm"]
- MongoUpgrade1214._migrate_alerts(osm_db)
-
-
-class MongoUpgrade1012:
- """Upgrade MongoDB Database from OSM v10 to v12."""
-
- @staticmethod
- def _remove_namespace_from_k8s(nsrs, nsr):
- namespace = "kube-system:"
- if nsr["_admin"].get("deployed"):
- k8s_list = []
- for k8s in nsr["_admin"]["deployed"].get("K8s"):
- if k8s.get("k8scluster-uuid"):
- k8s["k8scluster-uuid"] = k8s["k8scluster-uuid"].replace(namespace, "", 1)
- k8s_list.append(k8s)
- myquery = {"_id": nsr["_id"]}
- nsrs.update_one(myquery, {"$set": {"_admin.deployed.K8s": k8s_list}})
-
- @staticmethod
- def _update_nsr(osm_db):
- """Update nsr.
-
- Add vim_message = None if it does not exist.
- Remove "namespace:" from k8scluster-uuid.
- """
- if "nsrs" not in osm_db.list_collection_names():
- return
- logger.info("Entering in MongoUpgrade1012._update_nsr function")
-
- nsrs = osm_db["nsrs"]
- for nsr in nsrs.find():
- logger.debug(f"Updating {nsr['_id']} nsr")
- for key, values in nsr.items():
- if isinstance(values, list):
- item_list = []
- for value in values:
- if isinstance(value, dict) and value.get("vim_info"):
- index = list(value["vim_info"].keys())[0]
- if not value["vim_info"][index].get("vim_message"):
- value["vim_info"][index]["vim_message"] = None
- item_list.append(value)
- myquery = {"_id": nsr["_id"]}
- nsrs.update_one(myquery, {"$set": {key: item_list}})
- MongoUpgrade1012._remove_namespace_from_k8s(nsrs, nsr)
-
- @staticmethod
- def _update_vnfr(osm_db):
- """Update vnfr.
-
- Add vim_message to vdur if it does not exist.
- Copy content of interfaces into interfaces_backup.
- """
- if "vnfrs" not in osm_db.list_collection_names():
- return
- logger.info("Entering in MongoUpgrade1012._update_vnfr function")
- mycol = osm_db["vnfrs"]
- for vnfr in mycol.find():
- logger.debug(f"Updating {vnfr['_id']} vnfr")
- vdur_list = []
- for vdur in vnfr["vdur"]:
- if vdur.get("vim_info"):
- index = list(vdur["vim_info"].keys())[0]
- if not vdur["vim_info"][index].get("vim_message"):
- vdur["vim_info"][index]["vim_message"] = None
- if vdur["vim_info"][index].get(
- "interfaces", "Not found"
- ) != "Not found" and not vdur["vim_info"][index].get("interfaces_backup"):
- vdur["vim_info"][index]["interfaces_backup"] = vdur["vim_info"][index][
- "interfaces"
- ]
- vdur_list.append(vdur)
- myquery = {"_id": vnfr["_id"]}
- mycol.update_one(myquery, {"$set": {"vdur": vdur_list}})
-
- @staticmethod
- def _update_k8scluster(osm_db):
- """Remove namespace from helm-chart and helm-chart-v3 id."""
- if "k8sclusters" not in osm_db.list_collection_names():
- return
- logger.info("Entering in MongoUpgrade1012._update_k8scluster function")
- namespace = "kube-system:"
- k8sclusters = osm_db["k8sclusters"]
- for k8scluster in k8sclusters.find():
- if k8scluster["_admin"].get("helm-chart") and k8scluster["_admin"]["helm-chart"].get(
- "id"
- ):
- if k8scluster["_admin"]["helm-chart"]["id"].startswith(namespace):
- k8scluster["_admin"]["helm-chart"]["id"] = k8scluster["_admin"]["helm-chart"][
- "id"
- ].replace(namespace, "", 1)
- if k8scluster["_admin"].get("helm-chart-v3") and k8scluster["_admin"][
- "helm-chart-v3"
- ].get("id"):
- if k8scluster["_admin"]["helm-chart-v3"]["id"].startswith(namespace):
- k8scluster["_admin"]["helm-chart-v3"]["id"] = k8scluster["_admin"][
- "helm-chart-v3"
- ]["id"].replace(namespace, "", 1)
- myquery = {"_id": k8scluster["_id"]}
- k8sclusters.update_one(myquery, {"$set": k8scluster})
-
- @staticmethod
- def upgrade(mongo_uri):
- """Upgrade nsr, vnfr and k8scluster in DB."""
- logger.info("Entering in MongoUpgrade1012.upgrade function")
- myclient = MongoClient(mongo_uri)
- osm_db = myclient["osm"]
- MongoUpgrade1012._update_nsr(osm_db)
- MongoUpgrade1012._update_vnfr(osm_db)
- MongoUpgrade1012._update_k8scluster(osm_db)
-
-
-class MongoUpgrade910:
- """Upgrade MongoDB Database from OSM v9 to v10."""
-
- @staticmethod
- def upgrade(mongo_uri):
- """Add parameter alarm status = OK if not found in alarms collection."""
- myclient = MongoClient(mongo_uri)
- osm_db = myclient["osm"]
- collist = osm_db.list_collection_names()
-
- if "alarms" in collist:
- mycol = osm_db["alarms"]
- for x in mycol.find():
- if not x.get("alarm_status"):
- myquery = {"_id": x["_id"]}
- mycol.update_one(myquery, {"$set": {"alarm_status": "ok"}})
-
-
-class MongoPatch1837:
- """Patch Bug 1837 on MongoDB."""
-
- @staticmethod
- def _update_nslcmops_params(osm_db):
- """Updates the nslcmops collection to change the additional params to a string."""
- logger.info("Entering in MongoPatch1837._update_nslcmops_params function")
- if "nslcmops" in osm_db.list_collection_names():
- nslcmops = osm_db["nslcmops"]
- for nslcmop in nslcmops.find():
- if nslcmop.get("operationParams"):
- if nslcmop["operationParams"].get("additionalParamsForVnf") and isinstance(
- nslcmop["operationParams"].get("additionalParamsForVnf"), list
- ):
- string_param = json.dumps(
- nslcmop["operationParams"]["additionalParamsForVnf"]
- )
- myquery = {"_id": nslcmop["_id"]}
- nslcmops.update_one(
- myquery,
- {
- "$set": {
- "operationParams": {"additionalParamsForVnf": string_param}
- }
- },
- )
- elif nslcmop["operationParams"].get("primitive_params") and isinstance(
- nslcmop["operationParams"].get("primitive_params"), dict
- ):
- string_param = json.dumps(nslcmop["operationParams"]["primitive_params"])
- myquery = {"_id": nslcmop["_id"]}
- nslcmops.update_one(
- myquery,
- {"$set": {"operationParams": {"primitive_params": string_param}}},
- )
-
- @staticmethod
- def _update_vnfrs_params(osm_db):
- """Updates the vnfrs collection to change the additional params to a string."""
- logger.info("Entering in MongoPatch1837._update_vnfrs_params function")
- if "vnfrs" in osm_db.list_collection_names():
- mycol = osm_db["vnfrs"]
- for vnfr in mycol.find():
- if vnfr.get("kdur"):
- kdur_list = []
- for kdur in vnfr["kdur"]:
- if kdur.get("additionalParams") and not isinstance(
- kdur["additionalParams"], str
- ):
- kdur["additionalParams"] = json.dumps(kdur["additionalParams"])
- kdur_list.append(kdur)
- myquery = {"_id": vnfr["_id"]}
- mycol.update_one(
- myquery,
- {"$set": {"kdur": kdur_list}},
- )
- vnfr["kdur"] = kdur_list
-
- @staticmethod
- def patch(mongo_uri):
- """Updates the database to change the additional params from dict to a string."""
- logger.info("Entering in MongoPatch1837.patch function")
- myclient = MongoClient(mongo_uri)
- osm_db = myclient["osm"]
- MongoPatch1837._update_nslcmops_params(osm_db)
- MongoPatch1837._update_vnfrs_params(osm_db)
-
-
-MONGODB_UPGRADE_FUNCTIONS = {
- "9": {"10": [MongoUpgrade910.upgrade]},
- "10": {"12": [MongoUpgrade1012.upgrade]},
- "12": {"14": [MongoUpgrade1214.upgrade]},
-}
-MYSQL_UPGRADE_FUNCTIONS = {}
-BUG_FIXES = {
- 1837: MongoPatch1837.patch,
-}
-
-
-class MongoUpgrade:
- """Upgrade MongoDB Database."""
-
- def __init__(self, mongo_uri):
- self.mongo_uri = mongo_uri
-
- def upgrade(self, current, target):
- """Validates the upgrading path and upgrades the DB."""
- self._validate_upgrade(current, target)
- for function in MONGODB_UPGRADE_FUNCTIONS.get(current)[target]:
- function(self.mongo_uri)
-
- def _validate_upgrade(self, current, target):
- """Check if the upgrade path chosen is possible."""
- logger.info("Validating the upgrade path")
- if current not in MONGODB_UPGRADE_FUNCTIONS:
- raise Exception(f"cannot upgrade from {current} version.")
- if target not in MONGODB_UPGRADE_FUNCTIONS[current]:
- raise Exception(f"cannot upgrade from version {current} to {target}.")
-
- def apply_patch(self, bug_number: int) -> None:
- """Checks the bug-number and applies the fix in the database."""
- if bug_number not in BUG_FIXES:
- raise Exception(f"There is no patch for bug {bug_number}")
- patch_function = BUG_FIXES[bug_number]
- patch_function(self.mongo_uri)
-
-
-class MysqlUpgrade:
- """Upgrade Mysql Database."""
-
- def __init__(self, mysql_uri):
- self.mysql_uri = mysql_uri
-
- def upgrade(self, current, target):
- """Validates the upgrading path and upgrades the DB."""
- self._validate_upgrade(current, target)
- for function in MYSQL_UPGRADE_FUNCTIONS[current][target]:
- function(self.mysql_uri)
-
- def _validate_upgrade(self, current, target):
- """Check if the upgrade path chosen is possible."""
- logger.info("Validating the upgrade path")
- if current not in MYSQL_UPGRADE_FUNCTIONS:
- raise Exception(f"cannot upgrade from {current} version.")
- if target not in MYSQL_UPGRADE_FUNCTIONS[current]:
- raise Exception(f"cannot upgrade from version {current} to {target}.")
diff --git a/installers/charm/osm-update-db-operator/tests/integration/test_charm.py b/installers/charm/osm-update-db-operator/tests/integration/test_charm.py
deleted file mode 100644
index cc9e0be2..00000000
--- a/installers/charm/osm-update-db-operator/tests/integration/test_charm.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import base64
-import logging
-from pathlib import Path
-
-import pytest
-import yaml
-from pytest_operator.plugin import OpsTest
-
-logger = logging.getLogger(__name__)
-
-METADATA = yaml.safe_load(Path("./metadata.yaml").read_text())
-
-
-@pytest.mark.abort_on_fail
-async def test_build_and_deploy(ops_test: OpsTest):
- """Build the charm-under-test and deploy it together with related charms.
-
- Assert on the unit status before any relations/configurations take place.
- """
- await ops_test.model.set_config({"update-status-hook-interval": "10s"})
- # build and deploy charm from local source folder
- charm = await ops_test.build_charm(".")
- resources = {
- "update-db-image": METADATA["resources"]["update-db-image"]["upstream-source"],
- }
- await ops_test.model.deploy(charm, resources=resources, application_name="update-db")
- await ops_test.model.wait_for_idle(apps=["update-db"], status="active", timeout=1000)
- assert ops_test.model.applications["update-db"].units[0].workload_status == "active"
-
- await ops_test.model.set_config({"update-status-hook-interval": "60m"})
-
-
-def base64_encode(phrase: str) -> str:
- return base64.b64encode(phrase.encode("utf-8")).decode("utf-8")
diff --git a/installers/charm/osm-update-db-operator/tests/unit/test_charm.py b/installers/charm/osm-update-db-operator/tests/unit/test_charm.py
deleted file mode 100644
index a0f625db..00000000
--- a/installers/charm/osm-update-db-operator/tests/unit/test_charm.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import unittest
-from unittest.mock import Mock, patch
-
-from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus
-from ops.testing import Harness
-
-from charm import UpgradeDBCharm
-
-
-class TestCharm(unittest.TestCase):
- def setUp(self):
- self.harness = Harness(UpgradeDBCharm)
- self.addCleanup(self.harness.cleanup)
- self.harness.begin()
-
- def test_initial_config(self):
- self.assertEqual(self.harness.model.unit.status, MaintenanceStatus(""))
-
- def test_config_changed(self):
- self.harness.update_config({"mongodb-uri": "foo"})
- self.assertEqual(self.harness.model.unit.status, ActiveStatus())
-
- def test_config_changed_blocked(self):
- self.harness.update_config({"log-level": "DEBUG"})
- self.assertEqual(
- self.harness.model.unit.status,
- BlockedStatus("mongodb-uri and/or mysql-uri must be set"),
- )
-
- def test_update_db_fail_only_params(self):
- action_event = Mock(
- params={
- "current-version": 9,
- "target-version": 10,
- "mysql-only": True,
- "mongodb-only": True,
- }
- )
- self.harness.charm._on_update_db_action(action_event)
- self.assertEqual(
- action_event.fail.call_args,
- [("Failed DB Upgrade: cannot set both mysql-only and mongodb-only options to True",)],
- )
-
- @patch("charm.MongoUpgrade")
- @patch("charm.MysqlUpgrade")
- def test_update_db_mysql(self, mock_mysql_upgrade, mock_mongo_upgrade):
- self.harness.update_config({"mysql-uri": "foo"})
- action_event = Mock(
- params={
- "current-version": 9,
- "target-version": 10,
- "mysql-only": True,
- "mongodb-only": False,
- }
- )
- self.harness.charm._on_update_db_action(action_event)
- mock_mysql_upgrade().upgrade.assert_called_once()
- mock_mongo_upgrade.assert_not_called()
-
- @patch("charm.MongoUpgrade")
- @patch("charm.MysqlUpgrade")
- def test_update_db_mongo(self, mock_mysql_upgrade, mock_mongo_upgrade):
- self.harness.update_config({"mongodb-uri": "foo"})
- action_event = Mock(
- params={
- "current-version": 7,
- "target-version": 10,
- "mysql-only": False,
- "mongodb-only": True,
- }
- )
- self.harness.charm._on_update_db_action(action_event)
- mock_mongo_upgrade().upgrade.assert_called_once()
- mock_mysql_upgrade.assert_not_called()
-
- @patch("charm.MongoUpgrade")
- def test_update_db_not_configured_mongo_fail(self, mock_mongo_upgrade):
- action_event = Mock(
- params={
- "current-version": 7,
- "target-version": 10,
- "mysql-only": False,
- "mongodb-only": True,
- }
- )
- self.harness.charm._on_update_db_action(action_event)
- mock_mongo_upgrade.assert_not_called()
- self.assertEqual(
- action_event.fail.call_args,
- [("Failed DB Upgrade: mongo-uri not set",)],
- )
-
- @patch("charm.MysqlUpgrade")
- def test_update_db_not_configured_mysql_fail(self, mock_mysql_upgrade):
- action_event = Mock(
- params={
- "current-version": 7,
- "target-version": 10,
- "mysql-only": True,
- "mongodb-only": False,
- }
- )
- self.harness.charm._on_update_db_action(action_event)
- mock_mysql_upgrade.assert_not_called()
- self.assertEqual(
- action_event.fail.call_args,
- [("Failed DB Upgrade: mysql-uri not set",)],
- )
-
- @patch("charm.MongoUpgrade")
- @patch("charm.MysqlUpgrade")
- def test_update_db_mongodb_and_mysql(self, mock_mysql_upgrade, mock_mongo_upgrade):
- self.harness.update_config({"mongodb-uri": "foo"})
- self.harness.update_config({"mysql-uri": "foo"})
- action_event = Mock(
- params={
- "current-version": 7,
- "target-version": 10,
- "mysql-only": False,
- "mongodb-only": False,
- }
- )
- self.harness.charm._on_update_db_action(action_event)
- mock_mysql_upgrade().upgrade.assert_called_once()
- mock_mongo_upgrade().upgrade.assert_called_once()
-
- @patch("charm.MongoUpgrade")
- def test_apply_patch(self, mock_mongo_upgrade):
- self.harness.update_config({"mongodb-uri": "foo"})
- action_event = Mock(
- params={
- "bug-number": 57,
- }
- )
- self.harness.charm._on_apply_patch_action(action_event)
- mock_mongo_upgrade().apply_patch.assert_called_once()
-
- @patch("charm.MongoUpgrade")
- def test_apply_patch_fail(self, mock_mongo_upgrade):
- action_event = Mock(
- params={
- "bug-number": 57,
- }
- )
- self.harness.charm._on_apply_patch_action(action_event)
- mock_mongo_upgrade.assert_not_called()
- self.assertEqual(
- action_event.fail.call_args,
- [("Failed Patch Application: mongo-uri not set",)],
- )
diff --git a/installers/charm/osm-update-db-operator/tests/unit/test_db_upgrade.py b/installers/charm/osm-update-db-operator/tests/unit/test_db_upgrade.py
deleted file mode 100644
index 50affdd2..00000000
--- a/installers/charm/osm-update-db-operator/tests/unit/test_db_upgrade.py
+++ /dev/null
@@ -1,413 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import logging
-import unittest
-from unittest.mock import MagicMock, Mock, call, patch
-
-import db_upgrade
-from db_upgrade import (
- MongoPatch1837,
- MongoUpgrade,
- MongoUpgrade910,
- MongoUpgrade1012,
- MysqlUpgrade,
-)
-
-logger = logging.getLogger(__name__)
-
-
-class TestUpgradeMongo910(unittest.TestCase):
- @patch("db_upgrade.MongoClient")
- def test_upgrade_mongo_9_10(self, mock_mongo_client):
- mock_db = MagicMock()
- alarms = Mock()
- alarms.find.return_value = [{"_id": "1", "alarm_status": "1"}]
- collection_dict = {"alarms": alarms, "other": {}}
- mock_db.list_collection_names.return_value = collection_dict
- mock_db.__getitem__.side_effect = collection_dict.__getitem__
- mock_mongo_client.return_value = {"osm": mock_db}
- MongoUpgrade910.upgrade("mongo_uri")
- alarms.update_one.assert_not_called()
-
- @patch("db_upgrade.MongoClient")
- def test_upgrade_mongo_9_10_no_alarms(self, mock_mongo_client):
- mock_db = Mock()
- mock_db.__getitem__ = Mock()
-
- mock_db.list_collection_names.return_value = {"other": {}}
- mock_db.alarms.return_value = None
- mock_mongo_client.return_value = {"osm": mock_db}
- self.assertIsNone(MongoUpgrade910.upgrade("mongo_uri"))
-
- @patch("db_upgrade.MongoClient")
- def test_upgrade_mongo_9_10_no_alarm_status(self, mock_mongo_client):
- mock_db = MagicMock()
- alarms = Mock()
- alarms.find.return_value = [{"_id": "1"}]
- collection_dict = {"alarms": alarms, "other": {}}
- mock_db.list_collection_names.return_value = collection_dict
- mock_db.__getitem__.side_effect = collection_dict.__getitem__
- mock_db.alarms.return_value = alarms
- mock_mongo_client.return_value = {"osm": mock_db}
- MongoUpgrade910.upgrade("mongo_uri")
- alarms.update_one.assert_called_once_with({"_id": "1"}, {"$set": {"alarm_status": "ok"}})
-
-
-class TestUpgradeMongo1012(unittest.TestCase):
- def setUp(self):
- self.mock_db = MagicMock()
- self.nsrs = Mock()
- self.vnfrs = Mock()
- self.k8s_clusters = Mock()
-
- @patch("db_upgrade.MongoClient")
- def test_update_nsr_empty_nsrs(self, mock_mongo_client):
- self.nsrs.find.return_value = []
- collection_list = {"nsrs": self.nsrs}
- self.mock_db.__getitem__.side_effect = collection_list.__getitem__
- self.mock_db.list_collection_names.return_value = collection_list
- mock_mongo_client.return_value = {"osm": self.mock_db}
- MongoUpgrade1012.upgrade("mongo_uri")
-
- @patch("db_upgrade.MongoClient")
- def test_update_nsr_empty_nsr(self, mock_mongo_client):
- nsr = MagicMock()
- nsr_values = {"_id": "2", "_admin": {}}
- nsr.__getitem__.side_effect = nsr_values.__getitem__
- nsr.items.return_value = []
- self.nsrs.find.return_value = [nsr]
- collection_list = {"nsrs": self.nsrs}
- self.mock_db.__getitem__.side_effect = collection_list.__getitem__
- self.mock_db.list_collection_names.return_value = collection_list
- mock_mongo_client.return_value = {"osm": self.mock_db}
- MongoUpgrade1012.upgrade("mongo_uri")
-
- @patch("db_upgrade.MongoClient")
- def test_update_nsr_add_vim_message(self, mock_mongo_client):
- nsr = MagicMock()
- vim_info1 = {"vim_info_key1": {}}
- vim_info2 = {"vim_info_key2": {"vim_message": "Hello"}}
- nsr_items = {"nsr_item_key": [{"vim_info": vim_info1}, {"vim_info": vim_info2}]}
- nsr_values = {"_id": "2", "_admin": {}}
- nsr.__getitem__.side_effect = nsr_values.__getitem__
- nsr.items.return_value = nsr_items.items()
- self.nsrs.find.return_value = [nsr]
- collection_list = {"nsrs": self.nsrs}
- self.mock_db.__getitem__.side_effect = collection_list.__getitem__
- self.mock_db.list_collection_names.return_value = collection_list
- mock_mongo_client.return_value = {"osm": self.mock_db}
- MongoUpgrade1012.upgrade("mongo_uri")
- expected_vim_info = {"vim_info_key1": {"vim_message": None}}
- expected_vim_info2 = {"vim_info_key2": {"vim_message": "Hello"}}
- self.assertEqual(vim_info1, expected_vim_info)
- self.assertEqual(vim_info2, expected_vim_info2)
- self.nsrs.update_one.assert_called_once_with({"_id": "2"}, {"$set": nsr_items})
-
- @patch("db_upgrade.MongoClient")
- def test_update_nsr_admin(self, mock_mongo_client):
- nsr = MagicMock()
- k8s = [{"k8scluster-uuid": "namespace"}, {"k8scluster-uuid": "kube-system:k8s"}]
- admin = {"deployed": {"K8s": k8s}}
- nsr_values = {"_id": "2", "_admin": admin}
- nsr.__getitem__.side_effect = nsr_values.__getitem__
- nsr_items = {}
- nsr.items.return_value = nsr_items.items()
- self.nsrs.find.return_value = [nsr]
- collection_list = {"nsrs": self.nsrs}
- self.mock_db.__getitem__.side_effect = collection_list.__getitem__
- self.mock_db.list_collection_names.return_value = collection_list
- mock_mongo_client.return_value = {"osm": self.mock_db}
- MongoUpgrade1012.upgrade("mongo_uri")
- expected_k8s = [{"k8scluster-uuid": "namespace"}, {"k8scluster-uuid": "k8s"}]
- self.nsrs.update_one.assert_called_once_with(
- {"_id": "2"}, {"$set": {"_admin.deployed.K8s": expected_k8s}}
- )
-
- @patch("db_upgrade.MongoClient")
- def test_update_vnfr_empty_vnfrs(self, mock_mongo_client):
- self.vnfrs.find.return_value = [{"_id": "10", "vdur": []}]
- collection_list = {"vnfrs": self.vnfrs}
- self.mock_db.__getitem__.side_effect = collection_list.__getitem__
- self.mock_db.list_collection_names.return_value = collection_list
- mock_mongo_client.return_value = {"osm": self.mock_db}
- MongoUpgrade1012.upgrade("mongo_uri")
- self.vnfrs.update_one.assert_called_once_with({"_id": "10"}, {"$set": {"vdur": []}})
-
- @patch("db_upgrade.MongoClient")
- def test_update_vnfr_no_vim_info(self, mock_mongo_client):
- vdur = {"other": {}}
- vnfr = {"_id": "10", "vdur": [vdur]}
- self.vnfrs.find.return_value = [vnfr]
- collection_list = {"vnfrs": self.vnfrs}
- self.mock_db.__getitem__.side_effect = collection_list.__getitem__
- self.mock_db.list_collection_names.return_value = collection_list
- mock_mongo_client.return_value = {"osm": self.mock_db}
- MongoUpgrade1012.upgrade("mongo_uri")
- self.assertEqual(vdur, {"other": {}})
- self.vnfrs.update_one.assert_called_once_with({"_id": "10"}, {"$set": {"vdur": [vdur]}})
-
- @patch("db_upgrade.MongoClient")
- def test_update_vnfr_vim_message_not_conditions_matched(self, mock_mongo_client):
- vim_info = {"vim_message": "HelloWorld"}
- vim_infos = {"key1": vim_info, "key2": "value2"}
- vdur = {"vim_info": vim_infos, "other": {}}
- vnfr = {"_id": "10", "vdur": [vdur]}
- self.vnfrs.find.return_value = [vnfr]
- collection_list = {"vnfrs": self.vnfrs}
- self.mock_db.__getitem__.side_effect = collection_list.__getitem__
- self.mock_db.list_collection_names.return_value = collection_list
- mock_mongo_client.return_value = {"osm": self.mock_db}
- MongoUpgrade1012.upgrade("mongo_uri")
- expected_vim_info = {"vim_message": "HelloWorld"}
- self.assertEqual(vim_info, expected_vim_info)
- self.vnfrs.update_one.assert_called_once_with({"_id": "10"}, {"$set": {"vdur": [vdur]}})
-
- @patch("db_upgrade.MongoClient")
- def test_update_vnfr_vim_message_is_missing(self, mock_mongo_client):
- vim_info = {"interfaces_backup": "HelloWorld"}
- vim_infos = {"key1": vim_info, "key2": "value2"}
- vdur = {"vim_info": vim_infos, "other": {}}
- vnfr = {"_id": "10", "vdur": [vdur]}
- self.vnfrs.find.return_value = [vnfr]
- collection_list = {"vnfrs": self.vnfrs}
- self.mock_db.__getitem__.side_effect = collection_list.__getitem__
- self.mock_db.list_collection_names.return_value = collection_list
- mock_mongo_client.return_value = {"osm": self.mock_db}
- MongoUpgrade1012.upgrade("mongo_uri")
- expected_vim_info = {"vim_message": None, "interfaces_backup": "HelloWorld"}
- self.assertEqual(vim_info, expected_vim_info)
- self.vnfrs.update_one.assert_called_once_with({"_id": "10"}, {"$set": {"vdur": [vdur]}})
-
- @patch("db_upgrade.MongoClient")
- def test_update_vnfr_interfaces_backup_is_updated(self, mock_mongo_client):
- vim_info = {"interfaces": "HelloWorld", "vim_message": "ByeWorld"}
- vim_infos = {"key1": vim_info, "key2": "value2"}
- vdur = {"vim_info": vim_infos, "other": {}}
- vnfr = {"_id": "10", "vdur": [vdur]}
- self.vnfrs.find.return_value = [vnfr]
- collection_list = {"vnfrs": self.vnfrs}
- self.mock_db.__getitem__.side_effect = collection_list.__getitem__
- self.mock_db.list_collection_names.return_value = collection_list
- mock_mongo_client.return_value = {"osm": self.mock_db}
- MongoUpgrade1012.upgrade("mongo_uri")
- expected_vim_info = {
- "interfaces": "HelloWorld",
- "vim_message": "ByeWorld",
- "interfaces_backup": "HelloWorld",
- }
- self.assertEqual(vim_info, expected_vim_info)
- self.vnfrs.update_one.assert_called_once_with({"_id": "10"}, {"$set": {"vdur": [vdur]}})
-
- @patch("db_upgrade.MongoClient")
- def test_update_k8scluster_empty_k8scluster(self, mock_mongo_client):
- self.k8s_clusters.find.return_value = []
- collection_list = {"k8sclusters": self.k8s_clusters}
- self.mock_db.__getitem__.side_effect = collection_list.__getitem__
- self.mock_db.list_collection_names.return_value = collection_list
- mock_mongo_client.return_value = {"osm": self.mock_db}
- MongoUpgrade1012.upgrade("mongo_uri")
-
- @patch("db_upgrade.MongoClient")
- def test_update_k8scluster_replace_namespace_in_helm_chart(self, mock_mongo_client):
- helm_chart = {"id": "kube-system:Hello", "other": {}}
- k8s_cluster = {"_id": "8", "_admin": {"helm-chart": helm_chart}}
- self.k8s_clusters.find.return_value = [k8s_cluster]
- collection_list = {"k8sclusters": self.k8s_clusters}
- self.mock_db.__getitem__.side_effect = collection_list.__getitem__
- self.mock_db.list_collection_names.return_value = collection_list
- mock_mongo_client.return_value = {"osm": self.mock_db}
- MongoUpgrade1012.upgrade("mongo_uri")
- expected_helm_chart = {"id": "Hello", "other": {}}
- expected_k8s_cluster = {"_id": "8", "_admin": {"helm-chart": expected_helm_chart}}
- self.k8s_clusters.update_one.assert_called_once_with(
- {"_id": "8"}, {"$set": expected_k8s_cluster}
- )
-
- @patch("db_upgrade.MongoClient")
- def test_update_k8scluster_replace_namespace_in_helm_chart_v3(self, mock_mongo_client):
- helm_chart_v3 = {"id": "kube-system:Hello", "other": {}}
- k8s_cluster = {"_id": "8", "_admin": {"helm-chart-v3": helm_chart_v3}}
- self.k8s_clusters.find.return_value = [k8s_cluster]
- collection_list = {"k8sclusters": self.k8s_clusters}
- self.mock_db.__getitem__.side_effect = collection_list.__getitem__
- self.mock_db.list_collection_names.return_value = collection_list
- mock_mongo_client.return_value = {"osm": self.mock_db}
- MongoUpgrade1012.upgrade("mongo_uri")
- expected_helm_chart_v3 = {"id": "Hello", "other": {}}
- expected_k8s_cluster = {"_id": "8", "_admin": {"helm-chart-v3": expected_helm_chart_v3}}
- self.k8s_clusters.update_one.assert_called_once_with(
- {"_id": "8"}, {"$set": expected_k8s_cluster}
- )
-
-
-class TestPatch1837(unittest.TestCase):
- def setUp(self):
- self.mock_db = MagicMock()
- self.vnfrs = Mock()
- self.nslcmops = Mock()
-
- @patch("db_upgrade.MongoClient")
- def test_update_vnfrs_params_no_vnfrs_or_nslcmops(self, mock_mongo_client):
- collection_dict = {"other": {}}
- self.mock_db.list_collection_names.return_value = collection_dict
- mock_mongo_client.return_value = {"osm": self.mock_db}
- MongoPatch1837.patch("mongo_uri")
-
- @patch("db_upgrade.MongoClient")
- def test_update_vnfrs_params_no_kdur(self, mock_mongo_client):
- self.vnfrs.find.return_value = {"_id": "1"}
- collection_dict = {"vnfrs": self.vnfrs, "other": {}}
- self.mock_db.list_collection_names.return_value = collection_dict
- mock_mongo_client.return_value = {"osm": self.mock_db}
- MongoPatch1837.patch("mongo_uri")
-
- @patch("db_upgrade.MongoClient")
- def test_update_vnfrs_params_kdur_without_additional_params(self, mock_mongo_client):
- kdur = [{"other": {}}]
- self.vnfrs.find.return_value = [{"_id": "1", "kdur": kdur}]
- collection_dict = {"vnfrs": self.vnfrs, "other": {}}
- self.mock_db.list_collection_names.return_value = collection_dict
- self.mock_db.__getitem__.side_effect = collection_dict.__getitem__
- mock_mongo_client.return_value = {"osm": self.mock_db}
- MongoPatch1837.patch("mongo_uri")
- self.vnfrs.update_one.assert_called_once_with({"_id": "1"}, {"$set": {"kdur": kdur}})
-
- @patch("db_upgrade.MongoClient")
- def test_update_vnfrs_params_kdur_two_additional_params(self, mock_mongo_client):
- kdur1 = {"additionalParams": "additional_params", "other": {}}
- kdur2 = {"additionalParams": 4, "other": {}}
- kdur = [kdur1, kdur2]
- self.vnfrs.find.return_value = [{"_id": "1", "kdur": kdur}]
- collection_dict = {"vnfrs": self.vnfrs, "other": {}}
- self.mock_db.list_collection_names.return_value = collection_dict
- self.mock_db.__getitem__.side_effect = collection_dict.__getitem__
- mock_mongo_client.return_value = {"osm": self.mock_db}
- MongoPatch1837.patch("mongo_uri")
- self.vnfrs.update_one.assert_called_once_with(
- {"_id": "1"}, {"$set": {"kdur": [kdur1, {"additionalParams": "4", "other": {}}]}}
- )
-
- @patch("db_upgrade.MongoClient")
- def test_update_nslcmops_params_no_nslcmops(self, mock_mongo_client):
- self.nslcmops.find.return_value = []
- collection_dict = {"nslcmops": self.nslcmops, "other": {}}
- self.mock_db.list_collection_names.return_value = collection_dict
- self.mock_db.__getitem__.side_effect = collection_dict.__getitem__
- mock_mongo_client.return_value = {"osm": self.mock_db}
- MongoPatch1837.patch("mongo_uri")
-
- @patch("db_upgrade.MongoClient")
- def test_update_nslcmops_additional_params(self, mock_mongo_client):
- operation_params_list = {"additionalParamsForVnf": [1, 2, 3]}
- operation_params_dict = {"primitive_params": {"dict_key": 5}}
- nslcmops1 = {"_id": "1", "other": {}}
- nslcmops2 = {"_id": "2", "operationParams": operation_params_list, "other": {}}
- nslcmops3 = {"_id": "3", "operationParams": operation_params_dict, "other": {}}
- self.nslcmops.find.return_value = [nslcmops1, nslcmops2, nslcmops3]
- collection_dict = {"nslcmops": self.nslcmops, "other": {}}
- self.mock_db.list_collection_names.return_value = collection_dict
- self.mock_db.__getitem__.side_effect = collection_dict.__getitem__
- mock_mongo_client.return_value = {"osm": self.mock_db}
- MongoPatch1837.patch("mongo_uri")
- call1 = call(
- {"_id": "2"}, {"$set": {"operationParams": {"additionalParamsForVnf": "[1, 2, 3]"}}}
- )
- call2 = call(
- {"_id": "3"}, {"$set": {"operationParams": {"primitive_params": '{"dict_key": 5}'}}}
- )
- expected_calls = [call1, call2]
- self.nslcmops.update_one.assert_has_calls(expected_calls)
-
-
-class TestMongoUpgrade(unittest.TestCase):
- def setUp(self):
- self.mongo = MongoUpgrade("http://fake_mongo:27017")
- self.upgrade_function = Mock()
- self.patch_function = Mock()
- db_upgrade.MONGODB_UPGRADE_FUNCTIONS = {"9": {"10": [self.upgrade_function]}}
- db_upgrade.BUG_FIXES = {1837: self.patch_function}
-
- def test_validate_upgrade_fail_target(self):
- valid_current = "9"
- invalid_target = "7"
- with self.assertRaises(Exception) as context:
- self.mongo._validate_upgrade(valid_current, invalid_target)
- self.assertEqual("cannot upgrade from version 9 to 7.", str(context.exception))
-
- def test_validate_upgrade_fail_current(self):
- invalid_current = "7"
- invalid_target = "8"
- with self.assertRaises(Exception) as context:
- self.mongo._validate_upgrade(invalid_current, invalid_target)
- self.assertEqual("cannot upgrade from 7 version.", str(context.exception))
-
- def test_validate_upgrade_pass(self):
- valid_current = "9"
- valid_target = "10"
- self.assertIsNone(self.mongo._validate_upgrade(valid_current, valid_target))
-
- @patch("db_upgrade.MongoUpgrade._validate_upgrade")
- def test_update_mongo_success(self, mock_validate):
- valid_current = "9"
- valid_target = "10"
- mock_validate.return_value = ""
- self.mongo.upgrade(valid_current, valid_target)
- self.upgrade_function.assert_called_once()
-
- def test_validate_apply_patch(self):
- bug_number = 1837
- self.mongo.apply_patch(bug_number)
- self.patch_function.assert_called_once()
-
- def test_validate_apply_patch_invalid_bug_fail(self):
- bug_number = 2
- with self.assertRaises(Exception) as context:
- self.mongo.apply_patch(bug_number)
- self.assertEqual("There is no patch for bug 2", str(context.exception))
- self.patch_function.assert_not_called()
-
-
-class TestMysqlUpgrade(unittest.TestCase):
- def setUp(self):
- self.mysql = MysqlUpgrade("mysql://fake_mysql:23023")
- self.upgrade_function = Mock()
- db_upgrade.MYSQL_UPGRADE_FUNCTIONS = {"9": {"10": [self.upgrade_function]}}
-
- def test_validate_upgrade_mysql_fail_current(self):
- invalid_current = "7"
- invalid_target = "8"
- with self.assertRaises(Exception) as context:
- self.mysql._validate_upgrade(invalid_current, invalid_target)
- self.assertEqual("cannot upgrade from 7 version.", str(context.exception))
-
- def test_validate_upgrade_mysql_fail_target(self):
- valid_current = "9"
- invalid_target = "7"
- with self.assertRaises(Exception) as context:
- self.mysql._validate_upgrade(valid_current, invalid_target)
- self.assertEqual("cannot upgrade from version 9 to 7.", str(context.exception))
-
- def test_validate_upgrade_mysql_success(self):
- valid_current = "9"
- valid_target = "10"
- self.assertIsNone(self.mysql._validate_upgrade(valid_current, valid_target))
-
- @patch("db_upgrade.MysqlUpgrade._validate_upgrade")
- def test_upgrade_mysql_success(self, mock_validate):
- valid_current = "9"
- valid_target = "10"
- mock_validate.return_value = ""
- self.mysql.upgrade(valid_current, valid_target)
- self.upgrade_function.assert_called_once()
diff --git a/installers/charm/osm-update-db-operator/tox.ini b/installers/charm/osm-update-db-operator/tox.ini
deleted file mode 100644
index bcf628a8..00000000
--- a/installers/charm/osm-update-db-operator/tox.ini
+++ /dev/null
@@ -1,104 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-[tox]
-skipsdist=True
-skip_missing_interpreters = True
-envlist = lint, unit
-
-[vars]
-src_path = {toxinidir}/src/
-tst_path = {toxinidir}/tests/
-;lib_path = {toxinidir}/lib/charms/
-all_path = {[vars]src_path} {[vars]tst_path}
-
-[testenv]
-basepython = python3
-setenv =
- PYTHONPATH = {toxinidir}:{toxinidir}/lib:{[vars]src_path}
- PYTHONBREAKPOINT=ipdb.set_trace
-passenv =
- PYTHONPATH
- HOME
- PATH
- CHARM_BUILD_DIR
- MODEL_SETTINGS
- HTTP_PROXY
- HTTPS_PROXY
- NO_PROXY
-
-[testenv:fmt]
-description = Apply coding style standards to code
-deps =
- black
- isort
-commands =
- isort {[vars]all_path}
- black {[vars]all_path}
-
-[testenv:lint]
-description = Check code against coding style standards
-deps =
- black
- flake8>= 4.0.0, < 5.0.0
- flake8-docstrings
- flake8-copyright
- flake8-builtins
- # prospector[with_everything]
- pylint
- pyproject-flake8
- pep8-naming
- isort
- codespell
- yamllint
- -r{toxinidir}/requirements.txt
-commands =
- codespell {toxinidir}/*.yaml {toxinidir}/*.ini {toxinidir}/*.md \
- {toxinidir}/*.toml {toxinidir}/*.txt {toxinidir}/.github
- # prospector -A -F -T
- pylint -E {[vars]src_path}
- yamllint -d '\{extends: default, ignore: "build\n.tox" \}' .
- # pflake8 wrapper supports config from pyproject.toml
- pflake8 {[vars]all_path}
- isort --check-only --diff {[vars]all_path}
- black --check --diff {[vars]all_path}
-
-[testenv:unit]
-description = Run unit tests
-deps =
- pytest
- pytest-mock
- pytest-cov
- coverage[toml]
- -r{toxinidir}/requirements.txt
-commands =
- pytest --ignore={[vars]tst_path}integration --cov={[vars]src_path} --cov-report=xml
- coverage report
-
-[testenv:security]
-description = Run security tests
-deps =
- bandit
- safety
-commands =
- bandit -r {[vars]src_path}
- - safety check
-
-[testenv:integration]
-description = Run integration tests
-deps =
- pytest
- pytest-operator
-commands =
- pytest -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs}
diff --git a/installers/charm/prometheus/.gitignore b/installers/charm/prometheus/.gitignore
deleted file mode 100644
index 2885df27..00000000
--- a/installers/charm/prometheus/.gitignore
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-venv
-.vscode
-build
-*.charm
-.coverage
-coverage.xml
-.stestr
-cover
-release
\ No newline at end of file
diff --git a/installers/charm/prometheus/.jujuignore b/installers/charm/prometheus/.jujuignore
deleted file mode 100644
index 3ae3e7dc..00000000
--- a/installers/charm/prometheus/.jujuignore
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-venv
-.vscode
-build
-*.charm
-.coverage
-coverage.xml
-.gitignore
-.stestr
-cover
-release
-tests/
-requirements*
-tox.ini
diff --git a/installers/charm/prometheus/.yamllint.yaml b/installers/charm/prometheus/.yamllint.yaml
deleted file mode 100644
index d71fb69f..00000000
--- a/installers/charm/prometheus/.yamllint.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
----
-extends: default
-
-yaml-files:
- - "*.yaml"
- - "*.yml"
- - ".yamllint"
-ignore: |
- .tox
- cover/
- build/
- venv
- release/
diff --git a/installers/charm/prometheus/README.md b/installers/charm/prometheus/README.md
deleted file mode 100644
index 0486c0db..00000000
--- a/installers/charm/prometheus/README.md
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-# Prometheus operator Charm for Kubernetes
-
-## Requirements
diff --git a/installers/charm/prometheus/actions.yaml b/installers/charm/prometheus/actions.yaml
deleted file mode 100644
index e41f3df0..00000000
--- a/installers/charm/prometheus/actions.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-backup:
- description: "Do a mongodb backup"
diff --git a/installers/charm/prometheus/charmcraft.yaml b/installers/charm/prometheus/charmcraft.yaml
deleted file mode 100644
index 87d04635..00000000
--- a/installers/charm/prometheus/charmcraft.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-type: charm
-bases:
- - build-on:
- - name: ubuntu
- channel: "20.04"
- architectures: ["amd64"]
- run-on:
- - name: ubuntu
- channel: "20.04"
- architectures:
- - amd64
- - aarch64
- - arm64
-parts:
- charm:
- build-packages:
- - cargo
- - git
- - libffi-dev
- - rustc
diff --git a/installers/charm/prometheus/config.yaml b/installers/charm/prometheus/config.yaml
deleted file mode 100644
index b25eabae..00000000
--- a/installers/charm/prometheus/config.yaml
+++ /dev/null
@@ -1,85 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-options:
- web-subpath:
- description: Subpath for accessing Prometheus
- type: string
- default: /
- default-target:
- description: Default target to be added in Prometheus
- type: string
- default: ""
- max_file_size:
- type: int
- description: |
- The maximum file size, in megabytes. If there is a reverse proxy in front
- of Keystone, it may need to be configured to handle the requested size.
- Note: if set to 0, there is no limit.
- default: 0
- ingress_class:
- type: string
- description: |
- Ingress class name. This is useful for selecting the ingress to be used
- in case there are multiple ingresses in the underlying k8s clusters.
- ingress_whitelist_source_range:
- type: string
- description: |
- A comma-separated list of CIDRs to store in the
- ingress.kubernetes.io/whitelist-source-range annotation.
-
- This can be used to lock down access to
- Keystone based on source IP address.
- default: ""
- tls_secret_name:
- type: string
- description: TLS Secret name
- default: ""
- site_url:
- type: string
- description: Ingress URL
- default: ""
- cluster_issuer:
- type: string
- description: Name of the cluster issuer for TLS certificates
- default: ""
- enable_web_admin_api:
- type: boolean
- description: Boolean to enable the web admin api
- default: false
- image_pull_policy:
- type: string
- description: |
- ImagePullPolicy configuration for the pod.
- Possible values: always, ifnotpresent, never
- default: always
- security_context:
- description: Enables the security context of the pods
- type: boolean
- default: false
- web_config_username:
- type: string
- default: admin
- description: Username to access the Prometheus Web Interface
- web_config_password:
- type: string
- default: admin
- description: Password to access the Prometheus Web Interface
diff --git a/installers/charm/prometheus/icon.svg b/installers/charm/prometheus/icon.svg
deleted file mode 100644
index 5c51f66d..00000000
--- a/installers/charm/prometheus/icon.svg
+++ /dev/null
@@ -1,50 +0,0 @@
-
-
-
-
\ No newline at end of file
diff --git a/installers/charm/prometheus/metadata.yaml b/installers/charm/prometheus/metadata.yaml
deleted file mode 100644
index 932ccc21..00000000
--- a/installers/charm/prometheus/metadata.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-name: osm-prometheus
-summary: OSM Prometheus
-description: |
- A CAAS charm to deploy OSM's Prometheus.
-series:
- - kubernetes
-tags:
- - kubernetes
- - osm
- - prometheus
-min-juju-version: 2.8.0
-deployment:
- type: stateful
- service: cluster
-resources:
- backup-image:
- type: oci-image
- description: Container image to run backup actions
- upstream-source: "ed1000/prometheus-backup:latest"
- image:
- type: oci-image
- description: OSM docker image for Prometheus
- upstream-source: "ubuntu/prometheus:latest"
-provides:
- prometheus:
- interface: prometheus
-storage:
- data:
- type: filesystem
- location: /prometheus
diff --git a/installers/charm/prometheus/requirements-test.txt b/installers/charm/prometheus/requirements-test.txt
deleted file mode 100644
index cf61dd4e..00000000
--- a/installers/charm/prometheus/requirements-test.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-mock==4.0.3
diff --git a/installers/charm/prometheus/requirements.txt b/installers/charm/prometheus/requirements.txt
deleted file mode 100644
index db13e518..00000000
--- a/installers/charm/prometheus/requirements.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-git+https://github.com/charmed-osm/ops-lib-charmed-osm/@master
-requests
-urllib3>1.25.9
-bcrypt
diff --git a/installers/charm/prometheus/src/charm.py b/installers/charm/prometheus/src/charm.py
deleted file mode 100755
index af39a13a..00000000
--- a/installers/charm/prometheus/src/charm.py
+++ /dev/null
@@ -1,298 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-# pylint: disable=E0213
-
-import base64
-from ipaddress import ip_network
-import logging
-from typing import NoReturn, Optional
-from urllib.parse import urlparse
-
-import bcrypt
-from oci_image import OCIImageResource
-from ops.framework import EventBase
-from ops.main import main
-from opslib.osm.charm import CharmedOsmBase
-from opslib.osm.interfaces.prometheus import PrometheusServer
-from opslib.osm.pod import (
- ContainerV3Builder,
- FilesV3Builder,
- IngressResourceV3Builder,
- PodSpecV3Builder,
-)
-from opslib.osm.validator import (
- ModelValidator,
- validator,
-)
-import requests
-
-
-logger = logging.getLogger(__name__)
-
-PORT = 9090
-
-
-class ConfigModel(ModelValidator):
- web_subpath: str
- default_target: str
- max_file_size: int
- site_url: Optional[str]
- cluster_issuer: Optional[str]
- ingress_class: Optional[str]
- ingress_whitelist_source_range: Optional[str]
- tls_secret_name: Optional[str]
- enable_web_admin_api: bool
- image_pull_policy: str
- security_context: bool
- web_config_username: str
- web_config_password: str
-
- @validator("web_subpath")
- def validate_web_subpath(cls, v):
- if len(v) < 1:
- raise ValueError("web-subpath must be a non-empty string")
- return v
-
- @validator("max_file_size")
- def validate_max_file_size(cls, v):
- if v < 0:
- raise ValueError("value must be equal or greater than 0")
- return v
-
- @validator("site_url")
- def validate_site_url(cls, v):
- if v:
- parsed = urlparse(v)
- if not parsed.scheme.startswith("http"):
- raise ValueError("value must start with http")
- return v
-
- @validator("ingress_whitelist_source_range")
- def validate_ingress_whitelist_source_range(cls, v):
- if v:
- ip_network(v)
- return v
-
- @validator("image_pull_policy")
- def validate_image_pull_policy(cls, v):
- values = {
- "always": "Always",
- "ifnotpresent": "IfNotPresent",
- "never": "Never",
- }
- v = v.lower()
- if v not in values.keys():
- raise ValueError("value must be always, ifnotpresent or never")
- return values[v]
-
-
-class PrometheusCharm(CharmedOsmBase):
-
- """Prometheus Charm."""
-
- def __init__(self, *args) -> NoReturn:
- """Prometheus Charm constructor."""
- super().__init__(*args, oci_image="image")
-
- # Registering provided relation events
- self.prometheus = PrometheusServer(self, "prometheus")
- self.framework.observe(
- self.on.prometheus_relation_joined, # pylint: disable=E1101
- self._publish_prometheus_info,
- )
-
- # Registering actions
- self.framework.observe(
- self.on.backup_action, # pylint: disable=E1101
- self._on_backup_action,
- )
-
- def _publish_prometheus_info(self, event: EventBase) -> NoReturn:
- config = ConfigModel(**dict(self.config))
- self.prometheus.publish_info(
- self.app.name,
- PORT,
- user=config.web_config_username,
- password=config.web_config_password,
- )
-
- def _on_backup_action(self, event: EventBase) -> NoReturn:
- url = f"http://{self.model.app.name}:{PORT}/api/v1/admin/tsdb/snapshot"
- result = requests.post(url)
-
- if result.status_code == 200:
- event.set_results({"backup-name": result.json()["name"]})
- else:
- event.fail(f"status-code: {result.status_code}")
-
- def _build_config_file(self, config: ConfigModel):
- files_builder = FilesV3Builder()
- files_builder.add_file(
- "prometheus.yml",
- (
- "global:\n"
- " scrape_interval: 15s\n"
- " evaluation_interval: 15s\n"
- "alerting:\n"
- " alertmanagers:\n"
- " - static_configs:\n"
- " - targets:\n"
- "rule_files:\n"
- "scrape_configs:\n"
- " - job_name: 'prometheus'\n"
- " static_configs:\n"
- f" - targets: [{config.default_target}]\n"
- ),
- )
- return files_builder.build()
-
- def _build_webconfig_file(self):
- files_builder = FilesV3Builder()
- files_builder.add_file("web.yml", "web-config-file", secret=True)
- return files_builder.build()
-
- def build_pod_spec(self, image_info):
- # Validate config
- config = ConfigModel(**dict(self.config))
- # Create Builder for the PodSpec
- pod_spec_builder = PodSpecV3Builder(
- enable_security_context=config.security_context
- )
-
- # Build Backup Container
- backup_image = OCIImageResource(self, "backup-image")
- backup_image_info = backup_image.fetch()
- backup_container_builder = ContainerV3Builder("prom-backup", backup_image_info)
- backup_container = backup_container_builder.build()
-
- # Add backup container to pod spec
- pod_spec_builder.add_container(backup_container)
-
- # Add pod secrets
- prometheus_secret_name = f"{self.app.name}-secret"
- pod_spec_builder.add_secret(
- prometheus_secret_name,
- {
- "web-config-file": (
- "basic_auth_users:\n"
- f" {config.web_config_username}: {self._hash_password(config.web_config_password)}\n"
- )
- },
- )
-
- # Build Container
- container_builder = ContainerV3Builder(
- self.app.name,
- image_info,
- config.image_pull_policy,
- run_as_non_root=config.security_context,
- )
- container_builder.add_port(name=self.app.name, port=PORT)
- token = self._base64_encode(
- f"{config.web_config_username}:{config.web_config_password}"
- )
- container_builder.add_http_readiness_probe(
- "/-/ready",
- PORT,
- initial_delay_seconds=10,
- timeout_seconds=30,
- http_headers=[("Authorization", f"Basic {token}")],
- )
- container_builder.add_http_liveness_probe(
- "/-/healthy",
- PORT,
- initial_delay_seconds=30,
- period_seconds=30,
- http_headers=[("Authorization", f"Basic {token}")],
- )
- command = [
- "/bin/prometheus",
- "--config.file=/etc/prometheus/prometheus.yml",
- "--web.config.file=/etc/prometheus/web-config/web.yml",
- "--storage.tsdb.path=/prometheus",
- "--web.console.libraries=/usr/share/prometheus/console_libraries",
- "--web.console.templates=/usr/share/prometheus/consoles",
- f"--web.route-prefix={config.web_subpath}",
- f"--web.external-url=http://localhost:{PORT}{config.web_subpath}",
- ]
- if config.enable_web_admin_api:
- command.append("--web.enable-admin-api")
- container_builder.add_command(command)
- container_builder.add_volume_config(
- "config", "/etc/prometheus", self._build_config_file(config)
- )
- container_builder.add_volume_config(
- "web-config",
- "/etc/prometheus/web-config",
- self._build_webconfig_file(),
- secret_name=prometheus_secret_name,
- )
- container = container_builder.build()
- # Add container to pod spec
- pod_spec_builder.add_container(container)
- # Add ingress resources to pod spec if site url exists
- if config.site_url:
- parsed = urlparse(config.site_url)
- annotations = {
- "nginx.ingress.kubernetes.io/proxy-body-size": "{}".format(
- str(config.max_file_size) + "m"
- if config.max_file_size > 0
- else config.max_file_size
- )
- }
- if config.ingress_class:
- annotations["kubernetes.io/ingress.class"] = config.ingress_class
- ingress_resource_builder = IngressResourceV3Builder(
- f"{self.app.name}-ingress", annotations
- )
-
- if config.ingress_whitelist_source_range:
- annotations[
- "nginx.ingress.kubernetes.io/whitelist-source-range"
- ] = config.ingress_whitelist_source_range
-
- if config.cluster_issuer:
- annotations["cert-manager.io/cluster-issuer"] = config.cluster_issuer
-
- if parsed.scheme == "https":
- ingress_resource_builder.add_tls(
- [parsed.hostname], config.tls_secret_name
- )
- else:
- annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false"
-
- ingress_resource_builder.add_rule(parsed.hostname, self.app.name, PORT)
- ingress_resource = ingress_resource_builder.build()
- pod_spec_builder.add_ingress_resource(ingress_resource)
- return pod_spec_builder.build()
-
- def _hash_password(self, password):
- hashed_password = bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt())
- return hashed_password.decode()
-
- def _base64_encode(self, phrase: str) -> str:
- return base64.b64encode(phrase.encode("utf-8")).decode("utf-8")
-
-
-if __name__ == "__main__":
- main(PrometheusCharm)
diff --git a/installers/charm/prometheus/src/pod_spec.py b/installers/charm/prometheus/src/pod_spec.py
deleted file mode 100644
index 202114ee..00000000
--- a/installers/charm/prometheus/src/pod_spec.py
+++ /dev/null
@@ -1,380 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-from ipaddress import ip_network
-import logging
-from typing import Any, Dict, List
-from urllib.parse import urlparse
-
-logger = logging.getLogger(__name__)
-
-
-def _validate_max_file_size(max_file_size: int, site_url: str) -> bool:
- """Validate max_file_size.
-
- Args:
- max_file_size (int): maximum file size allowed.
- site_url (str): endpoint url.
-
- Returns:
- bool: True if valid, false otherwise.
- """
- if not site_url:
- return True
-
- parsed = urlparse(site_url)
-
- if not parsed.scheme.startswith("http"):
- return True
-
- if max_file_size is None:
- return False
-
- return max_file_size >= 0
-
-
-def _validate_ip_network(network: str) -> bool:
- """Validate IP network.
-
- Args:
- network (str): IP network range.
-
- Returns:
- bool: True if valid, false otherwise.
- """
- if not network:
- return True
-
- try:
- ip_network(network)
- except ValueError:
- return False
-
- return True
-
-
-def _validate_data(config_data: Dict[str, Any], relation_data: Dict[str, Any]) -> bool:
- """Validates passed information.
-
- Args:
- config_data (Dict[str, Any]): configuration information.
- relation_data (Dict[str, Any]): relation information
-
- Raises:
- ValueError: when config and/or relation data is not valid.
- """
- config_validators = {
- "web_subpath": lambda value, _: isinstance(value, str) and len(value) > 0,
- "default_target": lambda value, _: isinstance(value, str),
- "site_url": lambda value, _: isinstance(value, str)
- if value is not None
- else True,
- "max_file_size": lambda value, values: _validate_max_file_size(
- value, values.get("site_url")
- ),
- "ingress_whitelist_source_range": lambda value, _: _validate_ip_network(value),
- "tls_secret_name": lambda value, _: isinstance(value, str)
- if value is not None
- else True,
- "enable_web_admin_api": lambda value, _: isinstance(value, bool),
- }
- relation_validators = {}
- problems = []
-
- for key, validator in config_validators.items():
- valid = validator(config_data.get(key), config_data)
-
- if not valid:
- problems.append(key)
-
- for key, validator in relation_validators.items():
- valid = validator(relation_data.get(key), relation_data)
-
- if not valid:
- problems.append(key)
-
- if len(problems) > 0:
- raise ValueError("Errors found in: {}".format(", ".join(problems)))
-
- return True
-
-
-def _make_pod_ports(port: int) -> List[Dict[str, Any]]:
- """Generate pod ports details.
-
- Args:
- port (int): port to expose.
-
- Returns:
- List[Dict[str, Any]]: pod port details.
- """
- return [{"name": "prometheus", "containerPort": port, "protocol": "TCP"}]
-
-
-def _make_pod_envconfig(
- config: Dict[str, Any], relation_state: Dict[str, Any]
-) -> Dict[str, Any]:
- """Generate pod environment configuration.
-
- Args:
- config (Dict[str, Any]): configuration information.
- relation_state (Dict[str, Any]): relation state information.
-
- Returns:
- Dict[str, Any]: pod environment configuration.
- """
- envconfig = {}
-
- return envconfig
-
-
-def _make_pod_ingress_resources(
- config: Dict[str, Any], app_name: str, port: int
-) -> List[Dict[str, Any]]:
- """Generate pod ingress resources.
-
- Args:
- config (Dict[str, Any]): configuration information.
- app_name (str): application name.
- port (int): port to expose.
-
- Returns:
- List[Dict[str, Any]]: pod ingress resources.
- """
- site_url = config.get("site_url")
-
- if not site_url:
- return
-
- parsed = urlparse(site_url)
-
- if not parsed.scheme.startswith("http"):
- return
-
- max_file_size = config["max_file_size"]
- ingress_whitelist_source_range = config["ingress_whitelist_source_range"]
-
- annotations = {
- "nginx.ingress.kubernetes.io/proxy-body-size": "{}".format(
- str(max_file_size) + "m" if max_file_size > 0 else max_file_size
- ),
- }
-
- if ingress_whitelist_source_range:
- annotations[
- "nginx.ingress.kubernetes.io/whitelist-source-range"
- ] = ingress_whitelist_source_range
-
- ingress_spec_tls = None
-
- if parsed.scheme == "https":
- ingress_spec_tls = [{"hosts": [parsed.hostname]}]
- tls_secret_name = config["tls_secret_name"]
- if tls_secret_name:
- ingress_spec_tls[0]["secretName"] = tls_secret_name
- else:
- annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false"
-
- ingress = {
- "name": "{}-ingress".format(app_name),
- "annotations": annotations,
- "spec": {
- "rules": [
- {
- "host": parsed.hostname,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ]
- },
- }
- if ingress_spec_tls:
- ingress["spec"]["tls"] = ingress_spec_tls
-
- return [ingress]
-
-
-def _make_pod_files(config: Dict[str, Any]) -> List[Dict[str, Any]]:
- """Generating ConfigMap information
-
- Args:
- config (Dict[str, Any]): configuration information.
-
- Returns:
- List[Dict[str, Any]]: ConfigMap information.
- """
- files = [
- {
- "name": "config",
- "mountPath": "/etc/prometheus",
- "files": [
- {
- "path": "prometheus.yml",
- "content": (
- "global:\n"
- " scrape_interval: 15s\n"
- " evaluation_interval: 15s\n"
- "alerting:\n"
- " alertmanagers:\n"
- " - static_configs:\n"
- " - targets:\n"
- "rule_files:\n"
- "scrape_configs:\n"
- " - job_name: 'prometheus'\n"
- " static_configs:\n"
- " - targets: [{}]\n".format(config["default_target"])
- ),
- }
- ],
- }
- ]
-
- return files
-
-
-def _make_readiness_probe(port: int) -> Dict[str, Any]:
- """Generate readiness probe.
-
- Args:
- port (int): service port.
-
- Returns:
- Dict[str, Any]: readiness probe.
- """
- return {
- "httpGet": {
- "path": "/-/ready",
- "port": port,
- },
- "initialDelaySeconds": 10,
- "timeoutSeconds": 30,
- }
-
-
-def _make_liveness_probe(port: int) -> Dict[str, Any]:
- """Generate liveness probe.
-
- Args:
- port (int): service port.
-
- Returns:
- Dict[str, Any]: liveness probe.
- """
- return {
- "httpGet": {
- "path": "/-/healthy",
- "port": port,
- },
- "initialDelaySeconds": 30,
- "periodSeconds": 30,
- }
-
-
-def _make_pod_command(config: Dict[str, Any], port: int) -> List[str]:
- """Generate the startup command.
-
- Args:
- config (Dict[str, Any]): Configuration information.
- port (int): port.
-
- Returns:
- List[str]: command to startup the process.
- """
- command = [
- "/bin/prometheus",
- "--config.file=/etc/prometheus/prometheus.yml",
- "--storage.tsdb.path=/prometheus",
- "--web.console.libraries=/usr/share/prometheus/console_libraries",
- "--web.console.templates=/usr/share/prometheus/consoles",
- "--web.route-prefix={}".format(config.get("web_subpath")),
- "--web.external-url=http://localhost:{}{}".format(
- port, config.get("web_subpath")
- ),
- ]
- if config.get("enable_web_admin_api"):
- command.append("--web.enable-admin-api")
- return command
-
-
-def make_pod_spec(
- image_info: Dict[str, str],
- config: Dict[str, Any],
- relation_state: Dict[str, Any],
- app_name: str = "prometheus",
- port: int = 9090,
-) -> Dict[str, Any]:
- """Generate the pod spec information.
-
- Args:
- image_info (Dict[str, str]): Object provided by
- OCIImageResource("image").fetch().
- config (Dict[str, Any]): Configuration information.
- relation_state (Dict[str, Any]): Relation state information.
- app_name (str, optional): Application name. Defaults to "ro".
- port (int, optional): Port for the container. Defaults to 9090.
-
- Returns:
- Dict[str, Any]: Pod spec dictionary for the charm.
- """
- if not image_info:
- return None
-
- _validate_data(config, relation_state)
-
- ports = _make_pod_ports(port)
- env_config = _make_pod_envconfig(config, relation_state)
- files = _make_pod_files(config)
- readiness_probe = _make_readiness_probe(port)
- liveness_probe = _make_liveness_probe(port)
- ingress_resources = _make_pod_ingress_resources(config, app_name, port)
- command = _make_pod_command(config, port)
-
- return {
- "version": 3,
- "containers": [
- {
- "name": app_name,
- "imageDetails": image_info,
- "imagePullPolicy": "Always",
- "ports": ports,
- "envConfig": env_config,
- "volumeConfig": files,
- "command": command,
- "kubernetes": {
- "readinessProbe": readiness_probe,
- "livenessProbe": liveness_probe,
- },
- }
- ],
- "kubernetesResources": {
- "ingressResources": ingress_resources or [],
- },
- }
diff --git a/installers/charm/prometheus/tests/__init__.py b/installers/charm/prometheus/tests/__init__.py
deleted file mode 100644
index 446d5cee..00000000
--- a/installers/charm/prometheus/tests/__init__.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2020 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-"""Init mocking for unit tests."""
-
-import sys
-
-
-import mock
-
-
-class OCIImageResourceErrorMock(Exception):
- pass
-
-
-sys.path.append("src")
-
-oci_image = mock.MagicMock()
-oci_image.OCIImageResourceError = OCIImageResourceErrorMock
-sys.modules["oci_image"] = oci_image
-sys.modules["oci_image"].OCIImageResource().fetch.return_value = {}
diff --git a/installers/charm/prometheus/tests/test_charm.py b/installers/charm/prometheus/tests/test_charm.py
deleted file mode 100644
index 965400a4..00000000
--- a/installers/charm/prometheus/tests/test_charm.py
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2020 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-import sys
-from typing import NoReturn
-import unittest
-
-from charm import PrometheusCharm
-from ops.model import ActiveStatus
-from ops.testing import Harness
-
-
-class TestCharm(unittest.TestCase):
- """Prometheus Charm unit tests."""
-
- def setUp(self) -> NoReturn:
- """Test setup"""
- self.image_info = sys.modules["oci_image"].OCIImageResource().fetch()
- self.harness = Harness(PrometheusCharm)
- self.harness.set_leader(is_leader=True)
- self.harness.begin()
- self.config = {
- "web-subpath": "/",
- "default-target": "",
- "max_file_size": 0,
- "ingress_whitelist_source_range": "",
- "tls_secret_name": "",
- "site_url": "https://prometheus.192.168.100.100.nip.io",
- "cluster_issuer": "vault-issuer",
- "enable_web_admin_api": False,
- "web_config_username": "admin",
- "web_config_password": "1234",
- }
- self.harness.update_config(self.config)
-
- def test_config_changed(
- self,
- ) -> NoReturn:
- """Test ingress resources without HTTP."""
-
- self.harness.charm.on.config_changed.emit()
-
- # Assertions
- self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)
-
- def test_config_changed_non_leader(
- self,
- ) -> NoReturn:
- """Test ingress resources without HTTP."""
- self.harness.set_leader(is_leader=False)
- self.harness.charm.on.config_changed.emit()
-
- # Assertions
- self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)
-
- def test_publish_prometheus_info(
- self,
- ) -> NoReturn:
- """Test to see if prometheus relation is updated."""
- expected_result = {
- "hostname": self.harness.charm.app.name,
- "port": "9090",
- "user": "admin",
- "password": "1234",
- }
-
- relation_id = self.harness.add_relation("prometheus", "mon")
- self.harness.add_relation_unit(relation_id, "mon/0")
- relation_data = self.harness.get_relation_data(
- relation_id, self.harness.charm.app.name
- )
-
- self.assertDictEqual(expected_result, relation_data)
-
- def test_publish_prometheus_info_non_leader(
- self,
- ) -> NoReturn:
- """Test to see if prometheus relation is updated."""
- expected_result = {}
-
- self.harness.set_leader(is_leader=False)
- relation_id = self.harness.add_relation("prometheus", "mon")
- self.harness.add_relation_unit(relation_id, "mon/0")
- relation_data = self.harness.get_relation_data(
- relation_id, self.harness.charm.app.name
- )
-
- self.assertDictEqual(expected_result, relation_data)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/installers/charm/prometheus/tests/test_pod_spec.py b/installers/charm/prometheus/tests/test_pod_spec.py
deleted file mode 100644
index 1adbae64..00000000
--- a/installers/charm/prometheus/tests/test_pod_spec.py
+++ /dev/null
@@ -1,640 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2020 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-from typing import NoReturn
-import unittest
-
-import pod_spec
-
-
-class TestPodSpec(unittest.TestCase):
- """Pod spec unit tests."""
-
- def test_make_pod_ports(self) -> NoReturn:
- """Testing make pod ports."""
- port = 9090
-
- expected_result = [
- {
- "name": "prometheus",
- "containerPort": port,
- "protocol": "TCP",
- }
- ]
-
- pod_ports = pod_spec._make_pod_ports(port)
-
- self.assertListEqual(expected_result, pod_ports)
-
- def test_make_pod_envconfig(self) -> NoReturn:
- """Testing make pod envconfig."""
- config = {}
- relation_state = {}
-
- expected_result = {}
-
- pod_envconfig = pod_spec._make_pod_envconfig(config, relation_state)
-
- self.assertDictEqual(expected_result, pod_envconfig)
-
- def test_make_pod_ingress_resources_without_site_url(self) -> NoReturn:
- """Testing make pod ingress resources without site_url."""
- config = {"site_url": ""}
- app_name = "prometheus"
- port = 9090
-
- pod_ingress_resources = pod_spec._make_pod_ingress_resources(
- config, app_name, port
- )
-
- self.assertIsNone(pod_ingress_resources)
-
- def test_make_pod_ingress_resources(self) -> NoReturn:
- """Testing make pod ingress resources."""
- config = {
- "site_url": "http://prometheus",
- "max_file_size": 0,
- "ingress_whitelist_source_range": "",
- }
- app_name = "prometheus"
- port = 9090
-
- expected_result = [
- {
- "name": f"{app_name}-ingress",
- "annotations": {
- "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}",
- "nginx.ingress.kubernetes.io/ssl-redirect": "false",
- },
- "spec": {
- "rules": [
- {
- "host": app_name,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ]
- },
- }
- ]
-
- pod_ingress_resources = pod_spec._make_pod_ingress_resources(
- config, app_name, port
- )
-
- self.assertListEqual(expected_result, pod_ingress_resources)
-
- def test_make_pod_ingress_resources_with_whitelist_source_range(self) -> NoReturn:
- """Testing make pod ingress resources with whitelist_source_range."""
- config = {
- "site_url": "http://prometheus",
- "max_file_size": 0,
- "ingress_whitelist_source_range": "0.0.0.0/0",
- }
- app_name = "prometheus"
- port = 9090
-
- expected_result = [
- {
- "name": f"{app_name}-ingress",
- "annotations": {
- "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}",
- "nginx.ingress.kubernetes.io/ssl-redirect": "false",
- "nginx.ingress.kubernetes.io/whitelist-source-range": config[
- "ingress_whitelist_source_range"
- ],
- },
- "spec": {
- "rules": [
- {
- "host": app_name,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ]
- },
- }
- ]
-
- pod_ingress_resources = pod_spec._make_pod_ingress_resources(
- config, app_name, port
- )
-
- self.assertListEqual(expected_result, pod_ingress_resources)
-
- def test_make_pod_ingress_resources_with_https(self) -> NoReturn:
- """Testing make pod ingress resources with HTTPs."""
- config = {
- "site_url": "https://prometheus",
- "max_file_size": 0,
- "ingress_whitelist_source_range": "",
- "tls_secret_name": "",
- }
- app_name = "prometheus"
- port = 9090
-
- expected_result = [
- {
- "name": f"{app_name}-ingress",
- "annotations": {
- "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}",
- },
- "spec": {
- "rules": [
- {
- "host": app_name,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ],
- "tls": [{"hosts": [app_name]}],
- },
- }
- ]
-
- pod_ingress_resources = pod_spec._make_pod_ingress_resources(
- config, app_name, port
- )
-
- self.assertListEqual(expected_result, pod_ingress_resources)
-
- def test_make_pod_ingress_resources_with_https_tls_secret_name(self) -> NoReturn:
- """Testing make pod ingress resources with HTTPs and TLS secret name."""
- config = {
- "site_url": "https://prometheus",
- "max_file_size": 0,
- "ingress_whitelist_source_range": "",
- "tls_secret_name": "secret_name",
- }
- app_name = "prometheus"
- port = 9090
-
- expected_result = [
- {
- "name": f"{app_name}-ingress",
- "annotations": {
- "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}",
- },
- "spec": {
- "rules": [
- {
- "host": app_name,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ],
- "tls": [
- {"hosts": [app_name], "secretName": config["tls_secret_name"]}
- ],
- },
- }
- ]
-
- pod_ingress_resources = pod_spec._make_pod_ingress_resources(
- config, app_name, port
- )
-
- self.assertListEqual(expected_result, pod_ingress_resources)
-
- def test_make_pod_files(self) -> NoReturn:
- """Testing make pod files."""
- config = {
- "web_subpath": "/",
- "default_target": "",
- "site_url": "",
- }
-
- expected_result = [
- {
- "name": "config",
- "mountPath": "/etc/prometheus",
- "files": [
- {
- "path": "prometheus.yml",
- "content": (
- "global:\n"
- " scrape_interval: 15s\n"
- " evaluation_interval: 15s\n"
- "alerting:\n"
- " alertmanagers:\n"
- " - static_configs:\n"
- " - targets:\n"
- "rule_files:\n"
- "scrape_configs:\n"
- " - job_name: 'prometheus'\n"
- " static_configs:\n"
- " - targets: [{}]\n".format(config["default_target"])
- ),
- }
- ],
- }
- ]
-
- pod_envconfig = pod_spec._make_pod_files(config)
- self.assertListEqual(expected_result, pod_envconfig)
-
- def test_make_readiness_probe(self) -> NoReturn:
- """Testing make readiness probe."""
- port = 9090
-
- expected_result = {
- "httpGet": {
- "path": "/-/ready",
- "port": port,
- },
- "initialDelaySeconds": 10,
- "timeoutSeconds": 30,
- }
-
- readiness_probe = pod_spec._make_readiness_probe(port)
-
- self.assertDictEqual(expected_result, readiness_probe)
-
- def test_make_liveness_probe(self) -> NoReturn:
- """Testing make liveness probe."""
- port = 9090
-
- expected_result = {
- "httpGet": {
- "path": "/-/healthy",
- "port": port,
- },
- "initialDelaySeconds": 30,
- "periodSeconds": 30,
- }
-
- liveness_probe = pod_spec._make_liveness_probe(port)
-
- self.assertDictEqual(expected_result, liveness_probe)
-
- def test_make_pod_command(self) -> NoReturn:
- """Testing make pod command."""
- port = 9090
- config = {
- "web_subpath": "/",
- "default_target": "",
- "site_url": "",
- }
-
- expected_result = [
- "/bin/prometheus",
- "--config.file=/etc/prometheus/prometheus.yml",
- "--storage.tsdb.path=/prometheus",
- "--web.console.libraries=/usr/share/prometheus/console_libraries",
- "--web.console.templates=/usr/share/prometheus/consoles",
- "--web.route-prefix={}".format(config.get("web_subpath")),
- "--web.external-url=http://localhost:{}{}".format(
- port, config.get("web_subpath")
- ),
- ]
-
- pod_envconfig = pod_spec._make_pod_command(config, port)
-
- self.assertListEqual(expected_result, pod_envconfig)
-
- def test_make_pod_command_with_web_admin_api_enabled(self) -> NoReturn:
- """Testing make pod command."""
- port = 9090
- config = {
- "web_subpath": "/",
- "default_target": "",
- "site_url": "",
- "enable_web_admin_api": True,
- }
-
- expected_result = [
- "/bin/prometheus",
- "--config.file=/etc/prometheus/prometheus.yml",
- "--storage.tsdb.path=/prometheus",
- "--web.console.libraries=/usr/share/prometheus/console_libraries",
- "--web.console.templates=/usr/share/prometheus/consoles",
- "--web.route-prefix={}".format(config.get("web_subpath")),
- "--web.external-url=http://localhost:{}{}".format(
- port, config.get("web_subpath")
- ),
- "--web.enable-admin-api",
- ]
-
- pod_envconfig = pod_spec._make_pod_command(config, port)
-
- self.assertListEqual(expected_result, pod_envconfig)
-
- def test_make_pod_spec(self) -> NoReturn:
- """Testing make pod spec."""
- image_info = {"upstream-source": "ubuntu/prometheus:latest"}
- config = {
- "web_subpath": "/",
- "default_target": "",
- "site_url": "",
- "enable_web_admin_api": False,
- }
- relation_state = {}
- app_name = "prometheus"
- port = 9090
-
- expected_result = {
- "version": 3,
- "containers": [
- {
- "name": app_name,
- "imageDetails": image_info,
- "imagePullPolicy": "Always",
- "ports": [
- {
- "name": app_name,
- "containerPort": port,
- "protocol": "TCP",
- }
- ],
- "envConfig": {},
- "volumeConfig": [
- {
- "name": "config",
- "mountPath": "/etc/prometheus",
- "files": [
- {
- "path": "prometheus.yml",
- "content": (
- "global:\n"
- " scrape_interval: 15s\n"
- " evaluation_interval: 15s\n"
- "alerting:\n"
- " alertmanagers:\n"
- " - static_configs:\n"
- " - targets:\n"
- "rule_files:\n"
- "scrape_configs:\n"
- " - job_name: 'prometheus'\n"
- " static_configs:\n"
- " - targets: [{}]\n".format(
- config.get("default_target")
- )
- ),
- }
- ],
- }
- ],
- "command": [
- "/bin/prometheus",
- "--config.file=/etc/prometheus/prometheus.yml",
- "--storage.tsdb.path=/prometheus",
- "--web.console.libraries=/usr/share/prometheus/console_libraries",
- "--web.console.templates=/usr/share/prometheus/consoles",
- "--web.route-prefix={}".format(config.get("web_subpath")),
- "--web.external-url=http://localhost:{}{}".format(
- port, config.get("web_subpath")
- ),
- ],
- "kubernetes": {
- "readinessProbe": {
- "httpGet": {
- "path": "/-/ready",
- "port": port,
- },
- "initialDelaySeconds": 10,
- "timeoutSeconds": 30,
- },
- "livenessProbe": {
- "httpGet": {
- "path": "/-/healthy",
- "port": port,
- },
- "initialDelaySeconds": 30,
- "periodSeconds": 30,
- },
- },
- }
- ],
- "kubernetesResources": {"ingressResources": []},
- }
-
- spec = pod_spec.make_pod_spec(
- image_info, config, relation_state, app_name, port
- )
-
- self.assertDictEqual(expected_result, spec)
-
- def test_make_pod_spec_with_ingress(self) -> NoReturn:
- """Testing make pod spec."""
- image_info = {"upstream-source": "ubuntu/prometheus:latest"}
- config = {
- "web_subpath": "/",
- "default_target": "",
- "site_url": "https://prometheus",
- "tls_secret_name": "prometheus",
- "max_file_size": 0,
- "ingress_whitelist_source_range": "0.0.0.0/0",
- "enable_web_admin_api": False,
- }
- relation_state = {}
- app_name = "prometheus"
- port = 9090
-
- expected_result = {
- "version": 3,
- "containers": [
- {
- "name": app_name,
- "imageDetails": image_info,
- "imagePullPolicy": "Always",
- "ports": [
- {
- "name": app_name,
- "containerPort": port,
- "protocol": "TCP",
- }
- ],
- "envConfig": {},
- "volumeConfig": [
- {
- "name": "config",
- "mountPath": "/etc/prometheus",
- "files": [
- {
- "path": "prometheus.yml",
- "content": (
- "global:\n"
- " scrape_interval: 15s\n"
- " evaluation_interval: 15s\n"
- "alerting:\n"
- " alertmanagers:\n"
- " - static_configs:\n"
- " - targets:\n"
- "rule_files:\n"
- "scrape_configs:\n"
- " - job_name: 'prometheus'\n"
- " static_configs:\n"
- " - targets: [{}]\n".format(
- config.get("default_target")
- )
- ),
- }
- ],
- }
- ],
- "command": [
- "/bin/prometheus",
- "--config.file=/etc/prometheus/prometheus.yml",
- "--storage.tsdb.path=/prometheus",
- "--web.console.libraries=/usr/share/prometheus/console_libraries",
- "--web.console.templates=/usr/share/prometheus/consoles",
- "--web.route-prefix={}".format(config.get("web_subpath")),
- "--web.external-url=http://localhost:{}{}".format(
- port, config.get("web_subpath")
- ),
- ],
- "kubernetes": {
- "readinessProbe": {
- "httpGet": {
- "path": "/-/ready",
- "port": port,
- },
- "initialDelaySeconds": 10,
- "timeoutSeconds": 30,
- },
- "livenessProbe": {
- "httpGet": {
- "path": "/-/healthy",
- "port": port,
- },
- "initialDelaySeconds": 30,
- "periodSeconds": 30,
- },
- },
- }
- ],
- "kubernetesResources": {
- "ingressResources": [
- {
- "name": "{}-ingress".format(app_name),
- "annotations": {
- "nginx.ingress.kubernetes.io/proxy-body-size": str(
- config.get("max_file_size")
- ),
- "nginx.ingress.kubernetes.io/whitelist-source-range": config.get(
- "ingress_whitelist_source_range"
- ),
- },
- "spec": {
- "rules": [
- {
- "host": app_name,
- "http": {
- "paths": [
- {
- "path": "/",
- "backend": {
- "serviceName": app_name,
- "servicePort": port,
- },
- }
- ]
- },
- }
- ],
- "tls": [
- {
- "hosts": [app_name],
- "secretName": config.get("tls_secret_name"),
- }
- ],
- },
- }
- ],
- },
- }
-
- spec = pod_spec.make_pod_spec(
- image_info, config, relation_state, app_name, port
- )
-
- self.assertDictEqual(expected_result, spec)
-
- def test_make_pod_spec_without_image_info(self) -> NoReturn:
- """Testing make pod spec without image_info."""
- image_info = None
- config = {
- "web_subpath": "/",
- "default_target": "",
- "site_url": "",
- "enable_web_admin_api": False,
- }
- relation_state = {}
- app_name = "prometheus"
- port = 9090
-
- spec = pod_spec.make_pod_spec(
- image_info, config, relation_state, app_name, port
- )
-
- self.assertIsNone(spec)
-
- def test_make_pod_spec_without_config(self) -> NoReturn:
- """Testing make pod spec without config."""
- image_info = {"upstream-source": "ubuntu/prometheus:latest"}
- config = {}
- relation_state = {}
- app_name = "prometheus"
- port = 9090
-
- with self.assertRaises(ValueError):
- pod_spec.make_pod_spec(image_info, config, relation_state, app_name, port)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/installers/charm/prometheus/tox.ini b/installers/charm/prometheus/tox.ini
deleted file mode 100644
index 4c7970df..00000000
--- a/installers/charm/prometheus/tox.ini
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-#######################################################################################
-
-[tox]
-envlist = black, cover, flake8, pylint, yamllint, safety
-skipsdist = true
-
-[tox:jenkins]
-toxworkdir = /tmp/.tox
-
-[testenv]
-basepython = python3.8
-setenv = VIRTUAL_ENV={envdir}
- PYTHONDONTWRITEBYTECODE = 1
-deps = -r{toxinidir}/requirements.txt
-
-
-#######################################################################################
-[testenv:black]
-deps = black
-commands =
- black --check --diff src/ tests/
-
-
-#######################################################################################
-[testenv:cover]
-deps = {[testenv]deps}
- -r{toxinidir}/requirements-test.txt
- coverage
- nose2
-commands =
- sh -c 'rm -f nosetests.xml'
- coverage erase
- nose2 -C --coverage src
- coverage report --omit='*tests*'
- coverage html -d ./cover --omit='*tests*'
- coverage xml -o coverage.xml --omit=*tests*
-whitelist_externals = sh
-
-
-#######################################################################################
-[testenv:flake8]
-deps = flake8
- flake8-import-order
-commands =
- flake8 src/ tests/
-
-
-#######################################################################################
-[testenv:pylint]
-deps = {[testenv]deps}
- -r{toxinidir}/requirements-test.txt
- pylint==2.10.2
-commands =
- pylint -E src/ tests/
-
-
-#######################################################################################
-[testenv:safety]
-setenv =
- LC_ALL=C.UTF-8
- LANG=C.UTF-8
-deps = {[testenv]deps}
- safety
-commands =
- - safety check --full-report
-
-
-#######################################################################################
-[testenv:yamllint]
-deps = {[testenv]deps}
- -r{toxinidir}/requirements-test.txt
- yamllint
-commands = yamllint .
-
-#######################################################################################
-[testenv:build]
-passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY
-whitelist_externals =
- charmcraft
- sh
-commands =
- charmcraft pack
- sh -c 'ubuntu_version=20.04; \
- architectures="amd64-aarch64-arm64"; \
- charm_name=`cat metadata.yaml | grep -E "^name: " | cut -f 2 -d " "`; \
- mv $charm_name"_ubuntu-"$ubuntu_version-$architectures.charm $charm_name.charm'
-
-#######################################################################################
-[flake8]
-ignore =
- W291,
- W293,
- W503,
- E123,
- E125,
- E226,
- E241,
-exclude =
- .git,
- __pycache__,
- .tox,
-max-line-length = 120
-show-source = True
-builtins = _
-max-complexity = 10
-import-order-style = google
diff --git a/installers/charm/vca-integrator-operator/.gitignore b/installers/charm/vca-integrator-operator/.gitignore
deleted file mode 100644
index 9ac35bd1..00000000
--- a/installers/charm/vca-integrator-operator/.gitignore
+++ /dev/null
@@ -1,25 +0,0 @@
-#######################################################################################
-# Copyright ETSI Contributors and Others.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#######################################################################################
-venv/
-build/
-*.charm
-.tox/
-.coverage
-coverage.xml
-__pycache__/
-*.py[cod]
-.vscode
diff --git a/installers/charm/vca-integrator-operator/.jujuignore b/installers/charm/vca-integrator-operator/.jujuignore
deleted file mode 100644
index 5cee0249..00000000
--- a/installers/charm/vca-integrator-operator/.jujuignore
+++ /dev/null
@@ -1,20 +0,0 @@
-#######################################################################################
-# Copyright ETSI Contributors and Others.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#######################################################################################
-
-/venv
-*.py[cod]
-*.charm
diff --git a/installers/charm/vca-integrator-operator/CONTRIBUTING.md b/installers/charm/vca-integrator-operator/CONTRIBUTING.md
deleted file mode 100644
index 32a5d04f..00000000
--- a/installers/charm/vca-integrator-operator/CONTRIBUTING.md
+++ /dev/null
@@ -1,74 +0,0 @@
-
-
-
-# Contributing
-
-## Overview
-
-This documents explains the processes and practices recommended for contributing enhancements to
-the OSM VCA Integrator charm.
-
-- If you would like to chat with us about your use-cases or proposed implementation, you can reach
- us at [Canonical Mattermost public channel](https://chat.charmhub.io/charmhub/channels/charm-dev)
- or [Discourse](https://discourse.charmhub.io/).
-- Familiarising yourself with the [Charmed Operator Framework](https://juju.is/docs/sdk) library
- will help you a lot when working on new features or bug fixes.
-- All enhancements require review before being merged. Code review typically examines
- - code quality
- - test coverage
- - user experience for Juju administrators this charm.
-- Please help us out in ensuring easy to review branches by rebasing your pull request branch onto
- the `main` branch. This also avoids merge commits and creates a linear Git commit history.
-
-## Developing
-
-You can use the environments created by `tox` for development:
-
-```shell
-tox --notest -e unit
-source .tox/unit/bin/activate
-```
-
-### Testing
-
-```shell
-tox -e fmt # update your code according to linting rules
-tox -e lint # code style
-tox -e unit # unit tests
-tox -e integration # integration tests
-tox # runs 'lint' and 'unit' environments
-```
-
-## Build charm
-
-Build the charm in this git repository using:
-
-```shell
-charmcraft pack
-```
-
-### Deploy
-
-```bash
-# Create a model
-juju add-model test-osm-vca-integrator
-# Enable DEBUG logging
-juju model-config logging-config="=INFO;unit=DEBUG"
-# Deploy the charm
-juju deploy ./osm-vca-integrator_ubuntu-22.04-amd64.charm --series jammy
-```
-
diff --git a/installers/charm/vca-integrator-operator/LICENSE b/installers/charm/vca-integrator-operator/LICENSE
deleted file mode 100644
index d6456956..00000000
--- a/installers/charm/vca-integrator-operator/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/installers/charm/vca-integrator-operator/README.md b/installers/charm/vca-integrator-operator/README.md
deleted file mode 100644
index 140af91a..00000000
--- a/installers/charm/vca-integrator-operator/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-
-
-# OSM VCA Integrator Operator
-
-## Description
-
-TODO
-
-## How-to guides
-
-### Deploy and configure
-
-Deploy the OSM VCA Integrator Charm using the Juju command line:
-
-```shell
-$ juju add-model osm-vca-integrator
-$ juju deploy osm-vca-integrator
-$ juju config osm-vca-integrator \
- k8s-cloud=microk8s \
- controllers="`cat ~/.local/share/juju/controllers.yaml`" \
- accounts="`cat ~/.local/share/juju/accounts.yaml`" \
- public-key="`cat ~/.local/share/juju/ssh/juju_id_rsa.pub`"
-```
-
-## Contributing
-
-Please see the [Juju SDK docs](https://juju.is/docs/sdk) for guidelines
-on enhancements to this charm following best practice guidelines, and
-`CONTRIBUTING.md` for developer guidance.
diff --git a/installers/charm/vca-integrator-operator/actions.yaml b/installers/charm/vca-integrator-operator/actions.yaml
deleted file mode 100644
index 65d82b91..00000000
--- a/installers/charm/vca-integrator-operator/actions.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-#######################################################################################
-# Copyright ETSI Contributors and Others.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#######################################################################################
\ No newline at end of file
diff --git a/installers/charm/vca-integrator-operator/charmcraft.yaml b/installers/charm/vca-integrator-operator/charmcraft.yaml
deleted file mode 100644
index 199e221d..00000000
--- a/installers/charm/vca-integrator-operator/charmcraft.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-#######################################################################################
-# Copyright ETSI Contributors and Others.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#######################################################################################
-
-type: "charm"
-bases:
- - build-on:
- - name: "ubuntu"
- channel: "22.04"
- run-on:
- - name: "ubuntu"
- channel: "22.04"
-parts:
- charm:
- charm-binary-python-packages: [cryptography, bcrypt]
- build-packages:
- - libffi-dev
diff --git a/installers/charm/vca-integrator-operator/config.yaml b/installers/charm/vca-integrator-operator/config.yaml
deleted file mode 100644
index 97b36cbd..00000000
--- a/installers/charm/vca-integrator-operator/config.yaml
+++ /dev/null
@@ -1,116 +0,0 @@
-#######################################################################################
-# Copyright ETSI Contributors and Others.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#######################################################################################
-
-options:
- accounts:
- description: |
- Content of the .local/share/juju/accounts.yaml file,
- which includes the relevant information about the accounts.
- type: string
- controllers:
- description: |
- Content of the .local/share/juju/controllers.yaml file,
- which includes the relevant information about the controllers.
- type: string
- public-key:
- description: |
- Juju public key, usually located at ~/.local/share/juju/ssh/juju_id_rsa.pub
- type: string
- lxd-cloud:
- description: |
- Name and credentials of the lxd cloud.
- This cloud will be used by N2VC to deploy LXD Proxy Charms.
-
- The expected input is the following:
- [:]
-
- By default, the will be the same as
- .
- type: string
- k8s-cloud:
- description: |
- Name and credentials of the k8s cloud.
- This cloud will be used by N2VC to deploy K8s Proxy Charms.
-
- The expected input is the following:
- [:]
-
- By default, the will be the same as
- .
- type: string
- model-configs:
- type: string
- description: |
- Yaml content with all the default model-configs to be sent
- in the relation vca relation.
-
- Example:
- juju config vca-integrator model-configs='
- agent-metadata-url: <>
- agent-stream: ...
- apt-ftp-proxy:
- apt-http-proxy:
- apt-https-proxy:
- apt-mirror:
- apt-no-proxy:
- automatically-retry-hooks:
- backup-dir:
- cloudinit-userdata:
- container-image-metadata-url:
- container-image-stream:
- container-inherit-properties:
- container-networking-method:
- default-series:
- default-space:
- development:
- disable-network-management:
- egress-subnets:
- enable-os-refresh-update:
- enable-os-upgrade:
- fan-config:
- firewall-mode:
- ftp-proxy:
- http-proxy:
- https-proxy:
- ignore-machine-addresses:
- image-metadata-url:
- image-stream:
- juju-ftp-proxy:
- juju-http-proxy:
- juju-https-proxy:
- juju-no-proxy:
- logforward-enabled:
- logging-config:
- lxd-snap-channel:
- max-action-results-age:
- max-action-results-size:
- max-status-history-age:
- max-status-history-size:
- net-bond-reconfigure-delay:
- no-proxy:
- provisioner-harvest-mode:
- proxy-ssh:
- snap-http-proxy:
- snap-https-proxy:
- snap-store-assertions:
- snap-store-proxy:
- snap-store-proxy-url:
- ssl-hostname-verification:
- test-mode:
- transmit-vendor-metrics:
- update-status-hook-interval:
- '
diff --git a/installers/charm/vca-integrator-operator/lib/charms/osm_vca_integrator/v0/vca.py b/installers/charm/vca-integrator-operator/lib/charms/osm_vca_integrator/v0/vca.py
deleted file mode 100644
index 21dac69c..00000000
--- a/installers/charm/vca-integrator-operator/lib/charms/osm_vca_integrator/v0/vca.py
+++ /dev/null
@@ -1,221 +0,0 @@
-# Copyright 2022 Canonical Ltd.
-# See LICENSE file for licensing details.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""VCA Library.
-
-VCA stands for VNF Configuration and Abstraction, and is one of the core components
-of OSM. The Juju Controller is in charged of this role.
-
-This [library](https://juju.is/docs/sdk/libraries) implements both sides of the
-`vca` [interface](https://juju.is/docs/sdk/relations).
-
-The *provider* side of this interface is implemented by the
-[osm-vca-integrator Charmed Operator](https://charmhub.io/osm-vca-integrator).
-
-helps to integrate with the
-vca-integrator charm, which provides data needed to the OSM components that need
-to talk to the VCA, and
-
-Any Charmed OSM component that *requires* to talk to the VCA should implement
-the *requirer* side of this interface.
-
-In a nutshell using this library to implement a Charmed Operator *requiring* VCA data
-would look like
-
-```
-$ charmcraft fetch-lib charms.osm_vca_integrator.v0.vca
-```
-
-`metadata.yaml`:
-
-```
-requires:
- vca:
- interface: osm-vca
-```
-
-`src/charm.py`:
-
-```
-from charms.osm_vca_integrator.v0.vca import VcaData, VcaIntegratorEvents, VcaRequires
-from ops.charm import CharmBase
-
-
-class MyCharm(CharmBase):
-
- on = VcaIntegratorEvents()
-
- def __init__(self, *args):
- super().__init__(*args)
- self.vca = VcaRequires(self)
- self.framework.observe(
- self.on.vca_data_changed,
- self._on_vca_data_changed,
- )
-
- def _on_vca_data_changed(self, event):
- # Get Vca data
- data: VcaData = self.vca.data
- # data.endpoints => "localhost:17070"
-```
-
-You can file bugs
-[here](https://github.com/charmed-osm/osm-vca-integrator-operator/issues)!
-"""
-
-import json
-import logging
-from typing import Any, Dict, Optional
-
-from ops.charm import CharmBase, CharmEvents, RelationChangedEvent
-from ops.framework import EventBase, EventSource, Object
-
-# The unique Charmhub library identifier, never change it
-from ops.model import Relation
-
-# The unique Charmhub library identifier, never change it
-LIBID = "746b36c382984e5c8660b78192d84ef9"
-
-# Increment this major API version when introducing breaking changes
-LIBAPI = 0
-
-# Increment this PATCH version before using `charmcraft publish-lib` or reset
-# to 0 if you are raising the major API version
-LIBPATCH = 3
-
-
-logger = logging.getLogger(__name__)
-
-
-class VcaDataChangedEvent(EventBase):
- """Event emitted whenever there is a change in the vca data."""
-
- def __init__(self, handle):
- super().__init__(handle)
-
-
-class VcaIntegratorEvents(CharmEvents):
- """VCA Integrator events.
-
- This class defines the events that ZooKeeper can emit.
-
- Events:
- vca_data_changed (_VcaDataChanged)
- """
-
- vca_data_changed = EventSource(VcaDataChangedEvent)
-
-
-RELATION_MANDATORY_KEYS = ("endpoints", "user", "secret", "public-key", "cacert", "model-configs")
-
-
-class VcaData:
- """Vca data class."""
-
- def __init__(self, data: Dict[str, Any]) -> None:
- self.data: str = data
- self.endpoints: str = data["endpoints"]
- self.user: str = data["user"]
- self.secret: str = data["secret"]
- self.public_key: str = data["public-key"]
- self.cacert: str = data["cacert"]
- self.lxd_cloud: str = data.get("lxd-cloud")
- self.lxd_credentials: str = data.get("lxd-credentials")
- self.k8s_cloud: str = data.get("k8s-cloud")
- self.k8s_credentials: str = data.get("k8s-credentials")
- self.model_configs: Dict[str, Any] = data.get("model-configs", {})
-
-
-class VcaDataMissingError(Exception):
- """Data missing exception."""
-
-
-class VcaRequires(Object):
- """Requires part of the vca relation.
-
- Attributes:
- endpoint_name: Endpoint name of the charm for the vca relation.
- data: Vca data from the relation.
- """
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "vca") -> None:
- super().__init__(charm, endpoint_name)
- self._charm = charm
- self.endpoint_name = endpoint_name
- self.framework.observe(charm.on[endpoint_name].relation_changed, self._on_relation_changed)
-
- @property
- def data(self) -> Optional[VcaData]:
- """Vca data from the relation."""
- relation: Relation = self.model.get_relation(self.endpoint_name)
- if not relation or relation.app not in relation.data:
- logger.debug("no application data in the event")
- return
-
- relation_data: Dict = dict(relation.data[relation.app])
- relation_data["model-configs"] = json.loads(relation_data.get("model-configs", "{}"))
- try:
- self._validate_relation_data(relation_data)
- return VcaData(relation_data)
- except VcaDataMissingError as e:
- logger.warning(e)
-
- def _on_relation_changed(self, event: RelationChangedEvent) -> None:
- if event.app not in event.relation.data:
- logger.debug("no application data in the event")
- return
-
- relation_data = event.relation.data[event.app]
- try:
- self._validate_relation_data(relation_data)
- self._charm.on.vca_data_changed.emit()
- except VcaDataMissingError as e:
- logger.warning(e)
-
- def _validate_relation_data(self, relation_data: Dict[str, str]) -> None:
- if not all(required_key in relation_data for required_key in RELATION_MANDATORY_KEYS):
- raise VcaDataMissingError("vca data not ready yet")
-
- clouds = ("lxd-cloud", "k8s-cloud")
- if not any(cloud in relation_data for cloud in clouds):
- raise VcaDataMissingError("no clouds defined yet")
-
-
-class VcaProvides(Object):
- """Provides part of the vca relation.
-
- Attributes:
- endpoint_name: Endpoint name of the charm for the vca relation.
- """
-
- def __init__(self, charm: CharmBase, endpoint_name: str = "vca") -> None:
- super().__init__(charm, endpoint_name)
- self.endpoint_name = endpoint_name
-
- def update_vca_data(self, vca_data: VcaData) -> None:
- """Update vca data in relation.
-
- Args:
- vca_data: VcaData object.
- """
- relation: Relation
- for relation in self.model.relations[self.endpoint_name]:
- if not relation or self.model.app not in relation.data:
- logger.debug("relation app data not ready yet")
- for key, value in vca_data.data.items():
- if key == "model-configs":
- value = json.dumps(value)
- relation.data[self.model.app][key] = value
diff --git a/installers/charm/vca-integrator-operator/metadata.yaml b/installers/charm/vca-integrator-operator/metadata.yaml
deleted file mode 100644
index bcc4375e..00000000
--- a/installers/charm/vca-integrator-operator/metadata.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-#######################################################################################
-# Copyright ETSI Contributors and Others.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#######################################################################################
-
-name: osm-vca-integrator
-display-name: OSM VCA Integrator
-summary: Deploy VCA integrator Operator Charm
-description: |
- This Operator deploys the vca-integrator charm that
- facilitates the integration between OSM charms and
- the VCA (Juju controller).
-maintainers:
- - David Garcia
-
-provides:
- vca:
- interface: osm-vca
diff --git a/installers/charm/vca-integrator-operator/pyproject.toml b/installers/charm/vca-integrator-operator/pyproject.toml
deleted file mode 100644
index 7f5495be..00000000
--- a/installers/charm/vca-integrator-operator/pyproject.toml
+++ /dev/null
@@ -1,52 +0,0 @@
-#######################################################################################
-# Copyright ETSI Contributors and Others.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#######################################################################################
-
-# Testing tools configuration
-[tool.coverage.run]
-branch = true
-
-[tool.coverage.report]
-show_missing = true
-
-[tool.pytest.ini_options]
-minversion = "6.0"
-log_cli_level = "INFO"
-
-# Formatting tools configuration
-[tool.black]
-line-length = 99
-target-version = ["py38"]
-
-[tool.isort]
-profile = "black"
-
-# Linting tools configuration
-[tool.flake8]
-max-line-length = 99
-max-doc-length = 99
-max-complexity = 10
-exclude = [".git", "__pycache__", ".tox", "build", "dist", "*.egg_info", "venv"]
-select = ["E", "W", "F", "C", "N", "R", "D", "H"]
-# Ignore W503, E501 because using black creates errors with this
-# Ignore D107 Missing docstring in __init__
-ignore = ["W503", "E402", "E501", "D107"]
-# D100, D101, D102, D103: Ignore missing docstrings in tests
-per-file-ignores = ["tests/*:D100,D101,D102,D103,D104"]
-docstring-convention = "google"
-
-[tool.bandit]
-tests = ["B201", "B301"]
diff --git a/installers/charm/vca-integrator-operator/requirements-dev.txt b/installers/charm/vca-integrator-operator/requirements-dev.txt
deleted file mode 100644
index 65d82b91..00000000
--- a/installers/charm/vca-integrator-operator/requirements-dev.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-#######################################################################################
-# Copyright ETSI Contributors and Others.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#######################################################################################
\ No newline at end of file
diff --git a/installers/charm/vca-integrator-operator/requirements.txt b/installers/charm/vca-integrator-operator/requirements.txt
deleted file mode 100644
index 387a2e02..00000000
--- a/installers/charm/vca-integrator-operator/requirements.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-#######################################################################################
-# Copyright ETSI Contributors and Others.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#######################################################################################
-ops < 2.2
-juju < 3
-pyyaml
diff --git a/installers/charm/vca-integrator-operator/src/charm.py b/installers/charm/vca-integrator-operator/src/charm.py
deleted file mode 100755
index 34cb4f93..00000000
--- a/installers/charm/vca-integrator-operator/src/charm.py
+++ /dev/null
@@ -1,213 +0,0 @@
-#!/usr/bin/env python3
-#######################################################################################
-# Copyright ETSI Contributors and Others.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#######################################################################################
-
-"""VcaIntegrator K8s charm module."""
-
-import asyncio
-import base64
-import logging
-import os
-from pathlib import Path
-from typing import Dict, Set
-
-import yaml
-from charms.osm_vca_integrator.v0.vca import VcaData, VcaProvides
-from juju.controller import Controller
-from ops.charm import CharmBase
-from ops.main import main
-from ops.model import ActiveStatus, BlockedStatus, StatusBase
-
-logger = logging.getLogger(__name__)
-
-GO_COOKIES = "/root/.go-cookies"
-JUJU_DATA = os.environ["JUJU_DATA"] = "/root/.local/share/juju"
-JUJU_CONFIGS = {
- "public-key": "ssh/juju_id_rsa.pub",
- "controllers": "controllers.yaml",
- "accounts": "accounts.yaml",
-}
-
-
-class CharmError(Exception):
- """Charm Error Exception."""
-
- def __init__(self, message: str, status_class: StatusBase = BlockedStatus) -> None:
- self.message = message
- self.status_class = status_class
- self.status = status_class(message)
-
-
-class VcaIntegratorCharm(CharmBase):
- """VcaIntegrator K8s Charm operator."""
-
- def __init__(self, *args):
- super().__init__(*args)
- self.vca_provider = VcaProvides(self)
- # Observe charm events
- event_observe_mapping = {
- self.on.config_changed: self._on_config_changed,
- self.on.vca_relation_joined: self._on_config_changed,
- }
- for event, observer in event_observe_mapping.items():
- self.framework.observe(event, observer)
-
- # ---------------------------------------------------------------------------
- # Properties
- # ---------------------------------------------------------------------------
-
- @property
- def clouds_set(self) -> Set:
- """Clouds set in the configuration."""
- clouds_set = set()
- for cloud_config in ["k8s-cloud", "lxd-cloud"]:
- if cloud_name := self.config.get(cloud_config):
- clouds_set.add(cloud_name.split(":")[0])
- return clouds_set
-
- @property
- def vca_data(self) -> VcaData:
- """Get VCA data."""
- return VcaData(self._get_vca_data())
-
- # ---------------------------------------------------------------------------
- # Handlers for Charm Events
- # ---------------------------------------------------------------------------
-
- def _on_config_changed(self, _) -> None:
- """Handler for the config-changed event."""
- # Validate charm configuration
- try:
- self._validate_config()
- self._write_controller_config_files()
- self._check_controller()
- self.vca_provider.update_vca_data(self.vca_data)
- self.unit.status = ActiveStatus()
- except CharmError as e:
- self.unit.status = e.status
-
- # ---------------------------------------------------------------------------
- # Validation and configuration
- # ---------------------------------------------------------------------------
-
- def _validate_config(self) -> None:
- """Validate charm configuration.
-
- Raises:
- Exception: if charm configuration is invalid.
- """
- # Check mandatory fields
- for mandatory_field in [
- "controllers",
- "accounts",
- "public-key",
- ]:
- if not self.config.get(mandatory_field):
- raise CharmError(f'missing config: "{mandatory_field}"')
- # Check if any clouds are set
- if not self.clouds_set:
- raise CharmError("no clouds set")
-
- if self.config.get("model-configs"):
- try:
- yaml.safe_load(self.config["model-configs"])
- except Exception:
- raise CharmError("invalid yaml format for model-configs")
-
- def _write_controller_config_files(self) -> None:
- Path(f"{JUJU_DATA}/ssh").mkdir(parents=True, exist_ok=True)
- go_cookies = Path(GO_COOKIES)
- if not go_cookies.is_file():
- go_cookies.write_text(data="[]")
- for config, path in JUJU_CONFIGS.items():
- Path(f"{JUJU_DATA}/{path}").expanduser().write_text(self.config[config])
-
- def _check_controller(self):
- loop = asyncio.get_event_loop()
- # Check controller connectivity
- loop.run_until_complete(self._check_controller_connectivity())
- # Check clouds exist in controller
- loop.run_until_complete(self._check_clouds_in_controller())
-
- async def _check_controller_connectivity(self):
- controller = Controller()
- await controller.connect()
- await controller.disconnect()
-
- async def _check_clouds_in_controller(self):
- controller = Controller()
- await controller.connect()
- try:
- controller_clouds = await controller.clouds()
- for cloud in self.clouds_set:
- if f"cloud-{cloud}" not in controller_clouds.clouds:
- raise CharmError(f"Cloud {cloud} does not exist in the controller")
- finally:
- await controller.disconnect()
-
- def _get_vca_data(self) -> Dict[str, str]:
- loop = asyncio.get_event_loop()
- data_from_config = self._get_vca_data_from_config()
- coro_data_from_controller = loop.run_until_complete(self._get_vca_data_from_controller())
- vca_data = {**data_from_config, **coro_data_from_controller}
- logger.debug(f"vca data={vca_data}")
- return vca_data
-
- def _get_vca_data_from_config(self) -> Dict[str, str]:
- data = {"public-key": self.config["public-key"]}
- if self.config.get("lxd-cloud"):
- lxd_cloud_parts = self.config["lxd-cloud"].split(":")
- data.update(
- {
- "lxd-cloud": lxd_cloud_parts[0],
- "lxd-credentials": lxd_cloud_parts[1]
- if len(lxd_cloud_parts) > 1
- else lxd_cloud_parts[0],
- }
- )
- if self.config.get("k8s-cloud"):
- k8s_cloud_parts = self.config["k8s-cloud"].split(":")
- data.update(
- {
- "k8s-cloud": k8s_cloud_parts[0],
- "k8s-credentials": k8s_cloud_parts[1]
- if len(k8s_cloud_parts) > 1
- else k8s_cloud_parts[0],
- }
- )
- if self.config.get("model-configs"):
- data["model-configs"] = yaml.safe_load(self.config["model-configs"])
-
- return data
-
- async def _get_vca_data_from_controller(self) -> Dict[str, str]:
- controller = Controller()
- await controller.connect()
- try:
- connection = controller._connector._connection
- return {
- "endpoints": ",".join(await controller.api_endpoints),
- "user": connection.username,
- "secret": connection.password,
- "cacert": base64.b64encode(connection.cacert.encode("utf-8")).decode("utf-8"),
- }
- finally:
- await controller.disconnect()
-
-
-if __name__ == "__main__": # pragma: no cover
- main(VcaIntegratorCharm)
diff --git a/installers/charm/vca-integrator-operator/tests/integration/test_charm.py b/installers/charm/vca-integrator-operator/tests/integration/test_charm.py
deleted file mode 100644
index 8d69e7b0..00000000
--- a/installers/charm/vca-integrator-operator/tests/integration/test_charm.py
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/usr/bin/env python3
-#######################################################################################
-# Copyright ETSI Contributors and Others.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#######################################################################################
-
-import asyncio
-import logging
-import shlex
-from pathlib import Path
-
-import pytest
-import yaml
-from pytest_operator.plugin import OpsTest
-
-logger = logging.getLogger(__name__)
-
-METADATA = yaml.safe_load(Path("./metadata.yaml").read_text())
-VCA_APP = "osm-vca"
-
-LCM_CHARM = "osm-lcm"
-LCM_APP = "lcm"
-KAFKA_CHARM = "kafka-k8s"
-KAFKA_APP = "kafka"
-MONGO_DB_CHARM = "mongodb-k8s"
-MONGO_DB_APP = "mongodb"
-RO_CHARM = "osm-ro"
-RO_APP = "ro"
-ZOOKEEPER_CHARM = "zookeeper-k8s"
-ZOOKEEPER_APP = "zookeeper"
-LCM_APPS = [KAFKA_APP, MONGO_DB_APP, ZOOKEEPER_APP, RO_APP, LCM_APP]
-MON_CHARM = "osm-mon"
-MON_APP = "mon"
-KEYSTONE_CHARM = "osm-keystone"
-KEYSTONE_APP = "keystone"
-MARIADB_CHARM = "charmed-osm-mariadb-k8s"
-MARIADB_APP = "mariadb"
-PROMETHEUS_CHARM = "osm-prometheus"
-PROMETHEUS_APP = "prometheus"
-MON_APPS = [
- KAFKA_APP,
- ZOOKEEPER_APP,
- KEYSTONE_APP,
- MONGO_DB_APP,
- MARIADB_APP,
- PROMETHEUS_APP,
- MON_APP,
-]
-
-
-@pytest.mark.abort_on_fail
-async def test_build_and_deploy(ops_test: OpsTest):
- """Build the charm osm-vca-integrator-k8s and deploy it together with related charms.
-
- Assert on the unit status before any relations/configurations take place.
- """
- charm = await ops_test.build_charm(".")
- await ops_test.model.deploy(charm, application_name=VCA_APP, series="jammy")
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=[VCA_APP],
- status="blocked",
- )
- assert ops_test.model.applications[VCA_APP].units[0].workload_status == "blocked"
-
-
-@pytest.mark.abort_on_fail
-async def test_vca_configuration(ops_test: OpsTest):
- controllers = (Path.home() / ".local/share/juju/controllers.yaml").read_text()
- accounts = (Path.home() / ".local/share/juju/accounts.yaml").read_text()
- public_key = (Path.home() / ".local/share/juju/ssh/juju_id_rsa.pub").read_text()
- await ops_test.model.applications[VCA_APP].set_config(
- {
- "controllers": controllers,
- "accounts": accounts,
- "public-key": public_key,
- "k8s-cloud": "microk8s",
- }
- )
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=[VCA_APP],
- status="active",
- )
-
-
-@pytest.mark.abort_on_fail
-async def test_vca_integration_lcm(ops_test: OpsTest):
- lcm_deploy_cmd = f"juju deploy {LCM_CHARM} {LCM_APP} --resource lcm-image=opensourcemano/lcm:testing-daily --channel=latest/beta --series=jammy"
- ro_deploy_cmd = f"juju deploy {RO_CHARM} {RO_APP} --resource ro-image=opensourcemano/ro:testing-daily --channel=latest/beta --series=jammy"
-
- await asyncio.gather(
- # LCM and RO charms have to be deployed differently since
- # bug https://github.com/juju/python-libjuju/pull/820
- # fails to parse assumes
- ops_test.run(*shlex.split(lcm_deploy_cmd), check=True),
- ops_test.run(*shlex.split(ro_deploy_cmd), check=True),
- ops_test.model.deploy(KAFKA_CHARM, application_name=KAFKA_APP, channel="stable"),
- ops_test.model.deploy(MONGO_DB_CHARM, application_name=MONGO_DB_APP, channel="5/edge"),
- ops_test.model.deploy(ZOOKEEPER_CHARM, application_name=ZOOKEEPER_APP, channel="stable"),
- )
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=LCM_APPS,
- )
- # wait for MongoDB to be active before relating RO to it
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(apps=[MONGO_DB_APP], status="active")
- logger.info("Adding relations")
- await ops_test.model.add_relation(KAFKA_APP, ZOOKEEPER_APP)
- await ops_test.model.add_relation(
- "{}:mongodb".format(RO_APP), "{}:database".format(MONGO_DB_APP)
- )
- await ops_test.model.add_relation(RO_APP, KAFKA_APP)
- # LCM specific
- await ops_test.model.add_relation(
- "{}:mongodb".format(LCM_APP), "{}:database".format(MONGO_DB_APP)
- )
- await ops_test.model.add_relation(LCM_APP, KAFKA_APP)
- await ops_test.model.add_relation(LCM_APP, RO_APP)
-
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=LCM_APPS,
- status="active",
- )
-
- logger.info("Adding relation VCA LCM")
- await ops_test.model.add_relation(VCA_APP, LCM_APP)
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=[VCA_APP, LCM_APP],
- status="active",
- )
-
-
-@pytest.mark.abort_on_fail
-async def test_vca_integration_mon(ops_test: OpsTest):
- keystone_image = "opensourcemano/keystone:testing-daily"
- keystone_deploy_cmd = f"juju deploy {KEYSTONE_CHARM} {KEYSTONE_APP} --resource keystone-image={keystone_image} --channel=latest/beta --series jammy"
- mon_deploy_cmd = f"juju deploy {MON_CHARM} {MON_APP} --resource mon-image=opensourcemano/mon:testing-daily --channel=latest/beta --series=jammy"
- await asyncio.gather(
- # MON charm has to be deployed differently since
- # bug https://github.com/juju/python-libjuju/issues/820
- # fails to parse assumes
- ops_test.run(*shlex.split(mon_deploy_cmd), check=True),
- ops_test.model.deploy(MARIADB_CHARM, application_name=MARIADB_APP, channel="stable"),
- ops_test.model.deploy(PROMETHEUS_CHARM, application_name=PROMETHEUS_APP, channel="stable"),
- # Keystone charm has to be deployed differently since
- # bug https://github.com/juju/python-libjuju/issues/766
- # prevents setting correctly the resources
- ops_test.run(*shlex.split(keystone_deploy_cmd), check=True),
- )
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=MON_APPS,
- )
-
- logger.info("Adding relations")
- await ops_test.model.add_relation(MARIADB_APP, KEYSTONE_APP)
- # MON specific
- await ops_test.model.add_relation(
- "{}:mongodb".format(MON_APP), "{}:database".format(MONGO_DB_APP)
- )
- await ops_test.model.add_relation(MON_APP, KAFKA_APP)
- await ops_test.model.add_relation(MON_APP, KEYSTONE_APP)
- await ops_test.model.add_relation(MON_APP, PROMETHEUS_APP)
-
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=MON_APPS,
- status="active",
- )
-
- logger.info("Adding relation VCA MON")
- await ops_test.model.add_relation(VCA_APP, MON_APP)
- async with ops_test.fast_forward():
- await ops_test.model.wait_for_idle(
- apps=[VCA_APP, MON_APP],
- status="active",
- )
diff --git a/installers/charm/vca-integrator-operator/tests/unit/test_charm.py b/installers/charm/vca-integrator-operator/tests/unit/test_charm.py
deleted file mode 100644
index 5018675d..00000000
--- a/installers/charm/vca-integrator-operator/tests/unit/test_charm.py
+++ /dev/null
@@ -1,34 +0,0 @@
-#######################################################################################
-# Copyright ETSI Contributors and Others.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#######################################################################################
-
-import pytest
-from ops.testing import Harness
-from pytest_mock import MockerFixture
-
-from charm import VcaIntegratorCharm
-
-
-@pytest.fixture
-def harness():
- osm_vca_integrator_harness = Harness(VcaIntegratorCharm)
- osm_vca_integrator_harness.begin()
- yield osm_vca_integrator_harness
- osm_vca_integrator_harness.cleanup()
-
-
-def test_on_config_changed(mocker: MockerFixture, harness: Harness):
- pass
diff --git a/installers/charm/vca-integrator-operator/tox.ini b/installers/charm/vca-integrator-operator/tox.ini
deleted file mode 100644
index a8eb8bc9..00000000
--- a/installers/charm/vca-integrator-operator/tox.ini
+++ /dev/null
@@ -1,106 +0,0 @@
-#######################################################################################
-# Copyright ETSI Contributors and Others.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#######################################################################################
-
-[tox]
-skipsdist=True
-skip_missing_interpreters = True
-envlist = lint, unit
-
-[vars]
-src_path = {toxinidir}/src/
-tst_path = {toxinidir}/tests/
-lib_path = {toxinidir}/lib/charms/osm_vca_integrator
-all_path = {[vars]src_path} {[vars]tst_path} {[vars]lib_path}
-
-[testenv]
-basepython = python3.8
-setenv =
- PYTHONPATH = {toxinidir}:{toxinidir}/lib:{[vars]src_path}
- PYTHONBREAKPOINT=ipdb.set_trace
- PY_COLORS=1
-passenv =
- PYTHONPATH
- CHARM_BUILD_DIR
- MODEL_SETTINGS
-
-[testenv:fmt]
-description = Apply coding style standards to code
-deps =
- black
- isort
-commands =
- isort {[vars]all_path}
- black {[vars]all_path}
-
-[testenv:lint]
-description = Check code against coding style standards
-deps =
- black
- flake8
- flake8-docstrings
- flake8-builtins
- pylint
- pyproject-flake8
- pep8-naming
- isort
- codespell
- yamllint
- -r{toxinidir}/requirements.txt
-commands =
- codespell {[vars]lib_path}
- codespell {toxinidir} --skip {toxinidir}/.git --skip {toxinidir}/.tox \
- --skip {toxinidir}/build --skip {toxinidir}/lib --skip {toxinidir}/venv \
- --skip {toxinidir}/.mypy_cache --skip {toxinidir}/icon.svg
- pylint -E {[vars]src_path}
- # pflake8 wrapper supports config from pyproject.toml
- pflake8 {[vars]all_path}
- isort --check-only --diff {[vars]all_path}
- black --check --diff {[vars]all_path}
-
-[testenv:unit]
-description = Run unit tests
-deps =
- pytest
- pytest-mock
- coverage[toml]
- -r{toxinidir}/requirements.txt
-commands =
- coverage run --source={[vars]src_path},{[vars]lib_path} \
- -m pytest --ignore={[vars]tst_path}integration -v --tb native -s {posargs}
- coverage report
- coverage xml
-
-[testenv:security]
-description = Run security tests
-deps =
- bandit
- safety
-commands =
- bandit -r {[vars]src_path}
- bandit -r {[vars]lib_path}
- - safety check
-
-[testenv:integration]
-description = Run integration tests
-deps =
- pytest
- juju<3
- pytest-operator
- -r{toxinidir}/requirements.txt
- -r{toxinidir}/requirements-dev.txt
-commands =
- pytest -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs} --cloud microk8s
diff --git a/installers/charm/zookeeper-k8s/.gitignore b/installers/charm/zookeeper-k8s/.gitignore
deleted file mode 100644
index 712eb963..00000000
--- a/installers/charm/zookeeper-k8s/.gitignore
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-release/
-__pycache__
-.tox
diff --git a/installers/charm/zookeeper-k8s/.yamllint.yaml b/installers/charm/zookeeper-k8s/.yamllint.yaml
deleted file mode 100644
index 21b95b5b..00000000
--- a/installers/charm/zookeeper-k8s/.yamllint.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
----
-
-extends: default
-rules:
- line-length: disable
-yaml-files:
- - '*.yaml'
- - '*.yml'
- - '.yamllint'
-ignore: |
- reactive/
- .tox
- release/
diff --git a/installers/charm/zookeeper-k8s/README.md b/installers/charm/zookeeper-k8s/README.md
deleted file mode 100755
index 442fbb23..00000000
--- a/installers/charm/zookeeper-k8s/README.md
+++ /dev/null
@@ -1,101 +0,0 @@
-
-
-# Overview
-
-Zookeeper for Juju CAAS
-
-
-## Testing
-
-The tests of this charm are done using tox and Zaza.
-
-
-
-### Prepare environment
-
-The machine in which the tests are run needs access to a juju k8s controller. The easiest way to approach this is by executing the following commands:
-
-```
-sudo apt install tox -y
-sudo snap install microk8s --classic
-sudo snap install juju
-
-microk8s.status --wait-ready
-microk8s.enable storage dashboard dns
-
-juju bootstrap microk8s k8s-cloud
-```
-
-If /usr/bin/python does not exist, you should probably need to do this:
-```
-sudo ln -s /usr/bin/python3 /usr/bin/python
-```
-
-### Build Charm
-
-**Download dependencies:**
-```
-mkdir -p ~/charm/layers ~/charm/builds
-cd ~/charm/layers
-git clone https://git.launchpad.net/charm-k8s-zookeeper zookeeper-k8s
-git clone https://git.launchpad.net/charm-osm-common osm-common
-```
-
-**Charm structure:**
-```
-âââ config.yaml
-âââ icon.svg
-âââ layer.yaml
-âââ metadata.yaml
-âââ reactive
-â âââ spec_template.yaml
-â âââ zookeeper.py
-âââ README.md
-âââ test-requirements.txt
-âââ tests
-â âââ basic_deployment.py
-â âââ bundles
-â â âââ zookeeper-ha.yaml
-â â âââ zookeeper.yaml
-â âââ tests.yaml
-âââ tox.ini
-```
-
-**Setup environment variables:**
-
-```
-export CHARM_LAYERS_DIR=~/charm/layers
-export CHARM_BUILD_DIR=~/charm/builds
-```
-
-**Build:**
-```
-charm build ~/charm/layers/zookeeper-k8s
-mkdir ~/charm/layers/zookeeper-k8s/tests/build/
-mv ~/charm/builds/zookeeper-k8s ~/charm/layers/zookeeper-k8s/tests/build/
-```
-
-### Test charm with Tox
-
-```
-cd ~/charm/layers/zookeeper-k8s
-tox -e func
-```
\ No newline at end of file
diff --git a/installers/charm/zookeeper-k8s/config.yaml b/installers/charm/zookeeper-k8s/config.yaml
deleted file mode 100755
index fe04908e..00000000
--- a/installers/charm/zookeeper-k8s/config.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-options:
- client-port:
- description: Zookeeper client port
- type: int
- default: 2181
- server-port:
- description: Zookeeper server port
- type: int
- default: 2888
- leader-election-port:
- description: Zookeeper leader-election port
- type: int
- default: 3888
- zookeeper-units:
- description: Zookeeper zookeeper-units
- type: int
- default: 1
- image:
- description: Zookeeper image to use
- type: string
- default: rocks.canonical.com:443/k8s.gcr.io/kubernetes-zookeeper:1.0-3.4.10
diff --git a/installers/charm/zookeeper-k8s/icon.svg b/installers/charm/zookeeper-k8s/icon.svg
deleted file mode 100644
index 0185a7e1..00000000
--- a/installers/charm/zookeeper-k8s/icon.svg
+++ /dev/null
@@ -1,38 +0,0 @@
-
-
\ No newline at end of file
diff --git a/installers/charm/zookeeper-k8s/layer.yaml b/installers/charm/zookeeper-k8s/layer.yaml
deleted file mode 100644
index 88e0fc0f..00000000
--- a/installers/charm/zookeeper-k8s/layer.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-includes:
- - "layer:caas-base"
- - 'layer:osm-common'
- - 'layer:status'
- - 'layer:leadership'
- - "interface:zookeeper"
-
-repo: https://code.launchpad.net/osm-k8s-bundle
diff --git a/installers/charm/zookeeper-k8s/metadata.yaml b/installers/charm/zookeeper-k8s/metadata.yaml
deleted file mode 100755
index 59128bc3..00000000
--- a/installers/charm/zookeeper-k8s/metadata.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-name: zookeeper-k8s
-summary: "zookeeper charm for Kubernetes."
-maintainers:
- - "SolutionsQA "
-description: |
- A CAAS charm to deploy zookeeper.
-tags:
- - "application"
-series:
- - kubernetes
-provides:
- zookeeper:
- interface: zookeeper
-storage:
- database:
- type: filesystem
- location: /var/lib/zookeeper
-deployment:
- type: stateful
- service: cluster
diff --git a/installers/charm/zookeeper-k8s/reactive/spec_template.yaml b/installers/charm/zookeeper-k8s/reactive/spec_template.yaml
deleted file mode 100644
index 2dd450a8..00000000
--- a/installers/charm/zookeeper-k8s/reactive/spec_template.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-version: 2
-containers:
- - name: %(name)s
- image: %(docker_image_path)s
- kubernetes:
- readinessProbe:
- tcpSocket:
- port: %(client-port)s
- initialDelaySeconds: 10
- timeoutSeconds: 5
- failureThreshold: 6
- successThreshold: 1
- livenessProbe:
- tcpSocket:
- port: %(client-port)s
- initialDelaySeconds: 20
- ports:
- - containerPort: %(client-port)s
- name: client
- - containerPort: %(server-port)s
- name: server
- - containerPort: %(leader-election-port)s
- name: leader-election
- config:
- ALLOW_ANONYMOUS_LOGIN: 'yes'
- command:
- - sh
- - -c
- - "start-zookeeper \
- --servers=%(zookeeper-units)s \
- --data_dir=/var/lib/zookeeper/data \
- --data_log_dir=/var/lib/zookeeper/data/log \
- --conf_dir=/opt/zookeeper/conf \
- --client_port=%(client-port)s \
- --election_port=%(leader-election-port)s \
- --server_port=%(server-port)s \
- --tick_time=2000 \
- --init_limit=10 \
- --sync_limit=5 \
- --heap=512M \
- --max_client_cnxns=60 \
- --snap_retain_count=3 \
- --purge_interval=12 \
- --max_session_timeout=40000 \
- --min_session_timeout=4000 \
- --log_level=INFO"
- # readinessProbe:
- # exec:
- # command:
- # - sh
- # - -c
- # - "zookeeper-ready 2181"
- # initialDelaySeconds: 10
- # timeoutSeconds: 5
- # failureThreshold: 6
- # successThreshold: 1
- # livenessProbe:
- # exec:
- # command:
- # - sh
- # - -c
- # - "zookeeper-ready 2181"
- # initialDelaySeconds: 20
diff --git a/installers/charm/zookeeper-k8s/reactive/zookeeper.py b/installers/charm/zookeeper-k8s/reactive/zookeeper.py
deleted file mode 100644
index 198e2076..00000000
--- a/installers/charm/zookeeper-k8s/reactive/zookeeper.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-from charms import layer
-from charms.layer.caas_base import pod_spec_set
-from charms.reactive import endpoint_from_flag
-from charms.reactive import when, when_not, hook
-from charms.reactive.flags import set_flag, clear_flag
-from charmhelpers.core.hookenv import (
- log,
- metadata,
- config,
-)
-
-from charms.osm.k8s import is_pod_up, get_service_ip
-
-
-@hook("upgrade-charm")
-@when("leadership.is_leader")
-def upgrade():
- clear_flag("zookeeper-k8s.configured")
-
-
-@when("config.changed")
-@when("leadership.is_leader")
-def config_changed():
- clear_flag("zookeeper-k8s.configured")
-
-
-@when_not("zookeeper-k8s.configured")
-@when("leadership.is_leader")
-def configure():
- layer.status.maintenance("Configuring zookeeper-k8s container")
- try:
- spec = make_pod_spec()
- log("set pod spec:\n{}".format(spec))
- pod_spec_set(spec)
- set_flag("zookeeper-k8s.configured")
-
- except Exception as e:
- layer.status.blocked("k8s spec failed to deploy: {}".format(e))
-
-
-@when("zookeeper-k8s.configured")
-def non_leader():
- layer.status.active("ready")
-
-
-@when_not("leadership.is_leader")
-def non_leaders_active():
- layer.status.active("ready")
-
-
-@when("zookeeper.joined")
-@when("zookeeper-k8s.configured")
-def send_config():
- layer.status.maintenance("Sending Zookeeper configuration")
- if not is_pod_up("zookeeper"):
- log("The pod is not ready.")
- return
-
- zookeeper = endpoint_from_flag("zookeeper.joined")
- if zookeeper:
- service_ip = get_service_ip("zookeeper")
- if service_ip:
- zookeeper.send_connection(
- get_zookeeper_client_port(), get_zookeeper_client_port(), service_ip,
- )
- layer.status.active("ready")
-
-
-def make_pod_spec():
- """Make pod specification for Kubernetes
-
- Returns:
- pod_spec: Pod specification for Kubernetes
- """
- with open("reactive/spec_template.yaml") as spec_file:
- pod_spec_template = spec_file.read()
-
- md = metadata()
- cfg = config()
- data = {"name": md.get("name"), "docker_image_path": cfg.get("image")}
- data.update(cfg)
- return pod_spec_template % data
-
-
-def get_zookeeper_client_port():
- """Returns Zookeeper port"""
- cfg = config()
- return cfg.get("client-port")
diff --git a/installers/charm/zookeeper-k8s/test-requirements.txt b/installers/charm/zookeeper-k8s/test-requirements.txt
deleted file mode 100644
index 25bd2f90..00000000
--- a/installers/charm/zookeeper-k8s/test-requirements.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-git+https://github.com/davigar15/zaza.git#egg=zaza
-git+https://github.com/python-zk/kazoo
diff --git a/installers/charm/zookeeper-k8s/tests/basic_deployment.py b/installers/charm/zookeeper-k8s/tests/basic_deployment.py
deleted file mode 100644
index f24112e8..00000000
--- a/installers/charm/zookeeper-k8s/tests/basic_deployment.py
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/usr/bin/python3
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-import unittest
-import zaza.model as model
-from kazoo.client import KazooClient
-
-
-def get_zookeeper_uri():
- zookeeper_uri = ""
- zookeeper_units = model.get_status().applications["zookeeper-k8s"]["units"]
- for i, unit_name in enumerate(zookeeper_units.keys()):
- if i:
- zookeeper_uri += ","
- unit_ip = zookeeper_units[unit_name]["address"]
- unit_port = 2181
- zookeeper_uri += "{}:{}".format(unit_ip, unit_port)
-
- return zookeeper_uri
-
-
-class BasicDeployment(unittest.TestCase):
- def test_get_zookeeper_uri(self):
- get_zookeeper_uri()
-
- def test_zookeeper_connection(self):
- zookeeper_uri = get_zookeeper_uri()
- zk = KazooClient(zookeeper_uri)
- self.assertEqual(zk.state, "LOST")
- zk.start()
- self.assertEqual(zk.state, "CONNECTED")
- zk.stop()
- self.assertEqual(zk.state, "LOST")
-
- def test_zookeeper_create_node(self):
- zookeeper_uri = get_zookeeper_uri()
- zk = KazooClient(hosts=zookeeper_uri, read_only=True)
- zk.start()
-
- zk.ensure_path("/create/new")
- self.assertTrue(zk.exists("/create/new"))
-
- zk.create("/create/new/node", b"a value")
- self.assertTrue(zk.exists("/create/new/node"))
-
- zk.stop()
-
- def test_zookeeper_reading_data(self):
- zookeeper_uri = get_zookeeper_uri()
- zk = KazooClient(hosts=zookeeper_uri, read_only=True)
- zk.start()
-
- zk.ensure_path("/reading/data")
- zk.create("/reading/data/node", b"a value")
-
- data, stat = zk.get("/reading/data")
- self.assertEqual(data.decode("utf-8"), "")
-
- children = zk.get_children("/reading/data")
- self.assertEqual(len(children), 1)
- self.assertEqual("node", children[0])
-
- data, stat = zk.get("/reading/data/node")
- self.assertEqual(data.decode("utf-8"), "a value")
- zk.stop()
-
- def test_zookeeper_updating_data(self):
- zookeeper_uri = get_zookeeper_uri()
- zk = KazooClient(hosts=zookeeper_uri, read_only=True)
- zk.start()
-
- zk.ensure_path("/updating/data")
- zk.create("/updating/data/node", b"a value")
-
- data, stat = zk.get("/updating/data/node")
- self.assertEqual(data.decode("utf-8"), "a value")
-
- zk.set("/updating/data/node", b"b value")
- data, stat = zk.get("/updating/data/node")
- self.assertEqual(data.decode("utf-8"), "b value")
- zk.stop()
-
- def test_zookeeper_deleting_data(self):
- zookeeper_uri = get_zookeeper_uri()
- zk = KazooClient(hosts=zookeeper_uri, read_only=True)
- zk.start()
-
- zk.ensure_path("/deleting/data")
- zk.create("/deleting/data/node", b"a value")
-
- zk.delete("/deleting/data/node", recursive=True)
-
- self.assertFalse(zk.exists("/deleting/data/node"))
- self.assertTrue(zk.exists("/deleting/data"))
- data, stat = zk.get("/deleting/data")
- self.assertEqual(stat.numChildren, 0)
- zk.delete("/deleting", recursive=True)
- self.assertFalse(zk.exists("/deleting"))
- zk.stop()
diff --git a/installers/charm/zookeeper-k8s/tests/bundles/zookeeper-ha.yaml b/installers/charm/zookeeper-k8s/tests/bundles/zookeeper-ha.yaml
deleted file mode 100644
index 9c893b41..00000000
--- a/installers/charm/zookeeper-k8s/tests/bundles/zookeeper-ha.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-bundle: kubernetes
-applications:
- zookeeper-k8s:
- charm: '../../release/'
- scale: 2
- options:
- zookeeper-units: 2
- series: kubernetes
- storage:
- database: 50M
diff --git a/installers/charm/zookeeper-k8s/tests/bundles/zookeeper.yaml b/installers/charm/zookeeper-k8s/tests/bundles/zookeeper.yaml
deleted file mode 100644
index 133606b6..00000000
--- a/installers/charm/zookeeper-k8s/tests/bundles/zookeeper.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-bundle: kubernetes
-applications:
- zookeeper-k8s:
- charm: '../../release/'
- scale: 1
- options:
- zookeeper-units: 1
- series: kubernetes
- storage:
- database: 50M
diff --git a/installers/charm/zookeeper-k8s/tests/tests.yaml b/installers/charm/zookeeper-k8s/tests/tests.yaml
deleted file mode 100644
index 50a0b097..00000000
--- a/installers/charm/zookeeper-k8s/tests/tests.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-gate_bundles:
- - zookeeper
- - zookeeper-ha
-smoke_bundles:
- - zookeeper
-tests:
- - tests.basic_deployment.BasicDeployment
diff --git a/installers/charm/zookeeper-k8s/tox.ini b/installers/charm/zookeeper-k8s/tox.ini
deleted file mode 100644
index 76605198..00000000
--- a/installers/charm/zookeeper-k8s/tox.ini
+++ /dev/null
@@ -1,81 +0,0 @@
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-[tox]
-envlist = pep8
-skipsdist = True
-
-[testenv]
-setenv = VIRTUAL_ENV={envdir}
- PYTHONHASHSEED=0
-whitelist_externals = juju
-passenv = HOME TERM CS_API_* OS_* AMULET_*
-deps = -r{toxinidir}/test-requirements.txt
-install_command =
- pip install {opts} {packages}
-
-[testenv:build]
-basepython = python3
-passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY
-setenv = CHARM_LAYERS_DIR = /tmp
-whitelist_externals = git
- charm
- rm
- mv
-commands =
- rm -rf /tmp/canonical-osm /tmp/osm-common
- rm -rf release
- git clone https://git.launchpad.net/charm-osm-common /tmp/osm-common
- charm build . --build-dir /tmp
- mv /tmp/zookeeper-k8s/ release/
-
-[testenv:black]
-basepython = python3
-deps =
- black
- yamllint
- flake8
-commands =
- black --check --diff .
- yamllint .
- flake8 reactive/ --max-line-length=88
- flake8 tests/ --max-line-length=88
-
-[testenv:pep8]
-basepython = python3
-deps=charm-tools
-commands = charm-proof
-
-[testenv:func-noop]
-basepython = python3
-commands =
- true
-
-[testenv:func]
-basepython = python3
-commands = functest-run-suite
-
-[testenv:func-smoke]
-basepython = python3
-commands = functest-run-suite --keep-model --smoke
-
-[testenv:venv]
-commands = {posargs}
diff --git a/installers/charmed_install.sh b/installers/charmed_install.sh
deleted file mode 100755
index 21f522df..00000000
--- a/installers/charmed_install.sh
+++ /dev/null
@@ -1,594 +0,0 @@
-#! /bin/bash
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# set -eux
-
-LXD_VERSION=5.0
-JUJU_VERSION=2.9
-JUJU_AGENT_VERSION=2.9.43
-K8S_CLOUD_NAME="k8s-cloud"
-KUBECTL="microk8s.kubectl"
-MICROK8S_VERSION=1.26
-OSMCLIENT_VERSION=latest
-IMAGES_OVERLAY_FILE=~/.osm/images-overlay.yaml
-PASSWORD_OVERLAY_FILE=~/.osm/password-overlay.yaml
-PATH=/snap/bin:${PATH}
-OSM_DEVOPS="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. &> /dev/null && pwd )"
-INSTALL_PLA=""
-PLA_OVERLAY_FILE=~/.osm/pla-overlay.yaml
-
-if [ -f ${OSM_DEVOPS}/common/all_funcs ] ; then
- source ${OSM_DEVOPS}/common/all_funcs
-else
- function track(){
- true
- }
- function FATAL_TRACK(){
- exit 1
- }
-fi
-
-MODEL_NAME=osm
-
-OSM_BUNDLE=ch:osm
-OSM_HA_BUNDLE=ch:osm-ha
-CHARMHUB_CHANNEL=latest/beta
-unset TAG
-
-function check_arguments(){
- while [ $# -gt 0 ] ; do
- case $1 in
- --bundle) BUNDLE="$2" ;;
- --overlay) OVERLAY="$2" ;;
- --k8s) KUBECFG="$2" ;;
- --vca) CONTROLLER="$2" ;;
- --small-profile) INSTALL_NOLXD=y;;
- --lxd) LXD_CLOUD="$2" ;;
- --lxd-cred) LXD_CREDENTIALS="$2" ;;
- --microstack) MICROSTACK=y ;;
- --ha) BUNDLE=$OSM_HA_BUNDLE ;;
- --tag) TAG="$2" ;;
- --registry) REGISTRY_INFO="$2" ;;
- --only-vca) ONLY_VCA=y ;;
- --pla) INSTALL_PLA=y ;;
- esac
- shift
- done
-
- # echo $BUNDLE $KUBECONFIG $LXDENDPOINT
-}
-
-function install_snaps(){
- if [ ! -v KUBECFG ]; then
- KUBEGRP="microk8s"
- sudo snap install microk8s --classic --channel=${MICROK8S_VERSION}/stable ||
- FATAL_TRACK k8scluster "snap install microk8s ${MICROK8S_VERSION}/stable failed"
- sudo usermod -a -G microk8s `whoami`
- # Workaround bug in calico MTU detection
- if [ ${DEFAULT_IF_MTU} -ne 1500 ] ; then
- sudo mkdir -p /var/lib/calico
- sudo ln -sf /var/snap/microk8s/current/var/lib/calico/mtu /var/lib/calico/mtu
- fi
- sudo cat /var/snap/microk8s/current/args/kube-apiserver | grep advertise-address || (
- echo "--advertise-address $DEFAULT_IP" | sudo tee -a /var/snap/microk8s/current/args/kube-apiserver
- sg ${KUBEGRP} -c microk8s.stop
- sg ${KUBEGRP} -c microk8s.start
- )
- mkdir -p ~/.kube
- sudo chown -f -R `whoami` ~/.kube
- sg ${KUBEGRP} -c "microk8s status --wait-ready"
- KUBECONFIG=~/.osm/microk8s-config.yaml
- sg ${KUBEGRP} -c "microk8s config" | tee ${KUBECONFIG}
- track k8scluster k8scluster_ok
- else
- KUBECTL="kubectl"
- sudo snap install kubectl --classic
- export KUBECONFIG=${KUBECFG}
- KUBEGRP=$(id -g -n)
- fi
- sudo snap install juju --classic --channel=$JUJU_VERSION/stable ||
- FATAL_TRACK juju "snap install juju ${JUJU_VERSION}/stable failed"
- track juju juju_ok
-}
-
-function bootstrap_k8s_lxd(){
- [ -v CONTROLLER ] && ADD_K8S_OPTS="--controller ${CONTROLLER}" && CONTROLLER_NAME=$CONTROLLER
- [ ! -v CONTROLLER ] && ADD_K8S_OPTS="--client" && BOOTSTRAP_NEEDED="yes" && CONTROLLER_NAME="osm-vca"
-
- if [ -v BOOTSTRAP_NEEDED ]; then
- CONTROLLER_PRESENT=$(juju controllers 2>/dev/null| grep ${CONTROLLER_NAME} | wc -l)
- if [ $CONTROLLER_PRESENT -ge 1 ]; then
- cat << EOF
-Threre is already a VCA present with the installer reserved name of "${CONTROLLER_NAME}".
-You may either explicitly use this VCA with the "--vca ${CONTROLLER_NAME}" option, or remove it
-using this command:
-
- juju destroy-controller --release-storage --destroy-all-models -y ${CONTROLLER_NAME}
-
-Please retry the installation once this conflict has been resolved.
-EOF
- FATAL_TRACK bootstrap_k8s "VCA already present"
- fi
- else
- CONTROLLER_PRESENT=$(juju controllers 2>/dev/null| grep ${CONTROLLER_NAME} | wc -l)
- if [ $CONTROLLER_PRESENT -le 0 ]; then
- cat << EOF
-Threre is no VCA present with the name "${CONTROLLER_NAME}". Please specify a VCA
-that exists, or remove the --vca ${CONTROLLER_NAME} option.
-
-Please retry the installation with one of the solutions applied.
-EOF
- FATAL_TRACK bootstrap_k8s "Requested VCA not present"
- fi
- fi
-
- if [ -v KUBECFG ]; then
- cat $KUBECFG | juju add-k8s $K8S_CLOUD_NAME $ADD_K8S_OPTS
- [ -v BOOTSTRAP_NEEDED ] && juju bootstrap $K8S_CLOUD_NAME $CONTROLLER_NAME \
- --config controller-service-type=loadbalancer \
- --agent-version=$JUJU_AGENT_VERSION
- else
- sg ${KUBEGRP} -c "echo ${DEFAULT_IP}-${DEFAULT_IP} | microk8s.enable metallb"
- sg ${KUBEGRP} -c "microk8s.enable ingress"
- sg ${KUBEGRP} -c "microk8s.enable hostpath-storage dns"
- TIME_TO_WAIT=30
- start_time="$(date -u +%s)"
- while true
- do
- now="$(date -u +%s)"
- if [[ $(( now - start_time )) -gt $TIME_TO_WAIT ]];then
- echo "Microk8s storage failed to enable"
- sg ${KUBEGRP} -c "microk8s.status"
- FATAL_TRACK bootstrap_k8s "Microk8s storage failed to enable"
- fi
- storage_status=`sg ${KUBEGRP} -c "microk8s.status -a storage"`
- if [[ $storage_status == "enabled" ]]; then
- break
- fi
- sleep 1
- done
-
- [ ! -v BOOTSTRAP_NEEDED ] && sg ${KUBEGRP} -c "microk8s.config" | juju add-k8s $K8S_CLOUD_NAME $ADD_K8S_OPTS
- [ -v BOOTSTRAP_NEEDED ] && sg ${KUBEGRP} -c \
- "juju bootstrap microk8s $CONTROLLER_NAME --config controller-service-type=loadbalancer --agent-version=$JUJU_AGENT_VERSION" \
- && K8S_CLOUD_NAME=microk8s
- fi
- track bootstrap_k8s bootstrap_k8s_ok
-
- if [ ! -v INSTALL_NOLXD ]; then
- if [ -v LXD_CLOUD ]; then
- if [ ! -v LXD_CREDENTIALS ]; then
- echo "The installer needs the LXD server certificate if the LXD is external"
- FATAL_TRACK bootstrap_lxd "No LXD certificate supplied"
- fi
- else
- LXDENDPOINT=$DEFAULT_IP
- LXD_CLOUD=~/.osm/lxd-cloud.yaml
- LXD_CREDENTIALS=~/.osm/lxd-credentials.yaml
- # Apply sysctl production values for optimal performance
- sudo cp /usr/share/osm-devops/installers/lxd/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
- sudo sysctl --system
- # Install LXD snap
- sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
- snap info lxd | grep installed > /dev/null
- if [ $? -eq 0 ]; then
- sudo snap refresh lxd --channel $LXD_VERSION/stable
- else
- sudo snap install lxd --channel $LXD_VERSION/stable
- fi
- # Configure LXD
- sudo usermod -a -G lxd `whoami`
- cat /usr/share/osm-devops/installers/lxd/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$LXDENDPOINT':8443/' | sg lxd -c "lxd init --preseed"
- sg lxd -c "lxd waitready"
-
- cat << EOF > $LXD_CLOUD
-clouds:
- lxd-cloud:
- type: lxd
- auth-types: [certificate]
- endpoint: "https://$LXDENDPOINT:8443"
- config:
- ssl-hostname-verification: false
-EOF
- openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
- cat << EOF > $LXD_CREDENTIALS
-credentials:
- lxd-cloud:
- lxd-cloud:
- auth-type: certificate
- server-cert: /var/snap/lxd/common/lxd/server.crt
- client-cert: ~/.osm/client.crt
- client-key: ~/.osm/client.key
-EOF
- lxc config trust add local: ~/.osm/client.crt
- fi
-
- juju add-cloud -c $CONTROLLER_NAME lxd-cloud $LXD_CLOUD --force
- juju add-credential -c $CONTROLLER_NAME lxd-cloud -f $LXD_CREDENTIALS
- sg lxd -c "lxd waitready"
- juju controller-config features=[k8s-operators]
- track bootstrap_lxd bootstrap_lxd_ok
- fi
-}
-
-function deploy_charmed_osm(){
- if [ -v REGISTRY_INFO ] ; then
- registry_parts=(${REGISTRY_INFO//@/ })
- if [ ${#registry_parts[@]} -eq 1 ] ; then
- # No credentials supplied
- REGISTRY_USERNAME=""
- REGISTRY_PASSWORD=""
- REGISTRY_URL=${registry_parts[0]}
- else
- credentials=${registry_parts[0]}
- credential_parts=(${credentials//:/ })
- REGISTRY_USERNAME=${credential_parts[0]}
- REGISTRY_PASSWORD=${credential_parts[1]}
- REGISTRY_URL=${registry_parts[1]}
- fi
- # Ensure the URL ends with a /
- case $REGISTRY_URL in
- */) ;;
- *) REGISTRY_URL=${REGISTRY_URL}/
- esac
- fi
-
- echo "Creating OSM model"
- if [ -v KUBECFG ]; then
- juju add-model $MODEL_NAME $K8S_CLOUD_NAME
- else
- sg ${KUBEGRP} -c "juju add-model $MODEL_NAME $K8S_CLOUD_NAME"
- fi
- echo "Deploying OSM with charms"
- images_overlay=""
- if [ -v REGISTRY_URL ]; then
- [ ! -v TAG ] && TAG='latest'
- fi
- [ -v TAG ] && generate_images_overlay && images_overlay="--overlay $IMAGES_OVERLAY_FILE"
-
- if [ -v OVERLAY ]; then
- extra_overlay="--overlay $OVERLAY"
- fi
- echo "Creating Password Overlay"
-
- generate_password_overlay && secret_overlay="--overlay $PASSWORD_OVERLAY_FILE"
-
- [ -n "$INSTALL_PLA" ] && create_pla_overlay && pla_overlay="--overlay $PLA_OVERLAY_FILE"
-
- if [ -v BUNDLE ]; then
- juju deploy --trust --channel $CHARMHUB_CHANNEL -m $MODEL_NAME $BUNDLE $images_overlay $extra_overlay $secret_overlay $pla_overlay
- else
- juju deploy --trust --channel $CHARMHUB_CHANNEL -m $MODEL_NAME $OSM_BUNDLE $images_overlay $extra_overlay $secret_overlay $pla_overlay
- fi
-
- if [ ! -v KUBECFG ]; then
- API_SERVER=${DEFAULT_IP}
- else
- API_SERVER=$(kubectl config view --minify | grep server | cut -f 2- -d ":" | tr -d " ")
- proto="$(echo $API_SERVER | grep :// | sed -e's,^\(.*://\).*,\1,g')"
- url="$(echo ${API_SERVER/$proto/})"
- user="$(echo $url | grep @ | cut -d@ -f1)"
- hostport="$(echo ${url/$user@/} | cut -d/ -f1)"
- API_SERVER="$(echo $hostport | sed -e 's,:.*,,g')"
- fi
- # Configure VCA Integrator
- if [ ! -v INSTALL_NOLXD ]; then
- juju config vca \
- k8s-cloud=microk8s \
- lxd-cloud=lxd-cloud:lxd-cloud \
- controllers="`cat ~/.local/share/juju/controllers.yaml`" \
- accounts="`cat ~/.local/share/juju/accounts.yaml`" \
- public-key="`cat ~/.local/share/juju/ssh/juju_id_rsa.pub`"
- else
- juju config vca \
- k8s-cloud=microk8s \
- controllers="`cat ~/.local/share/juju/controllers.yaml`" \
- accounts="`cat ~/.local/share/juju/accounts.yaml`" \
- public-key="`cat ~/.local/share/juju/ssh/juju_id_rsa.pub`"
- fi
- # Expose OSM services
- juju config -m $MODEL_NAME nbi external-hostname=nbi.${API_SERVER}.nip.io
- juju config -m $MODEL_NAME ng-ui external-hostname=ui.${API_SERVER}.nip.io
- juju config -m $MODEL_NAME grafana site_url=https://grafana.${API_SERVER}.nip.io
- juju config -m $MODEL_NAME prometheus site_url=https://prometheus.${API_SERVER}.nip.io
-
- echo "Waiting for deployment to finish..."
- check_osm_deployed
- grafana_leader=`juju status -m $MODEL_NAME grafana | grep "*" | cut -d "*" -f 1`
- grafana_admin_password=`juju run -m $MODEL_NAME --unit $grafana_leader "echo \\$GF_SECURITY_ADMIN_PASSWORD"`
- juju config -m $MODEL_NAME mon grafana-password=$grafana_admin_password
- check_osm_deployed
- echo "OSM with charms deployed"
-}
-
-function check_osm_deployed() {
- TIME_TO_WAIT=600
- start_time="$(date -u +%s)"
- total_service_count=15
- [ -n "$INSTALL_PLA" ] && total_service_count=$((total_service_count + 1))
- previous_count=0
- while true
- do
- service_count=$(juju status --format json -m $MODEL_NAME | jq '.applications[]."application-status".current' | grep active | wc -l)
- echo "$service_count / $total_service_count services active"
- if [ $service_count -eq $total_service_count ]; then
- break
- fi
- if [ $service_count -ne $previous_count ]; then
- previous_count=$service_count
- start_time="$(date -u +%s)"
- fi
- now="$(date -u +%s)"
- if [[ $(( now - start_time )) -gt $TIME_TO_WAIT ]];then
- echo "Timed out waiting for OSM services to become ready"
- FATAL_TRACK deploy_osm "Timed out waiting for services to become ready"
- fi
- sleep 10
- done
-}
-
-function generate_password_overlay() {
- # prometheus
- web_config_password=`openssl rand -hex 16`
- # keystone
- keystone_db_password=`openssl rand -hex 16`
- keystone_admin_password=`openssl rand -hex 16`
- keystone_service_password=`openssl rand -hex 16`
- # mariadb
- mariadb_password=`openssl rand -hex 16`
- mariadb_root_password=`openssl rand -hex 16`
- cat << EOF > /tmp/password-overlay.yaml
-applications:
- prometheus:
- options:
- web_config_password: $web_config_password
- keystone:
- options:
- keystone-db-password: $keystone_db_password
- admin-password: $keystone_admin_password
- service-password: $keystone_service_password
- mariadb:
- options:
- password: $mariadb_password
- root_password: $mariadb_root_password
-EOF
- mv /tmp/password-overlay.yaml $PASSWORD_OVERLAY_FILE
-}
-
-function create_pla_overlay(){
- echo "Creating PLA Overlay"
- [ $BUNDLE == $OSM_HA_BUNDLE ] && scale=3 || scale=1
- cat << EOF > /tmp/pla-overlay.yaml
-applications:
- pla:
- charm: osm-pla
- channel: latest/stable
- scale: $scale
- series: kubernetes
- options:
- log_level: DEBUG
- resources:
- image: opensourcemano/pla:testing-daily
-relations:
- - - pla:kafka
- - kafka:kafka
- - - pla:mongodb
- - mongodb:database
-EOF
- mv /tmp/pla-overlay.yaml $PLA_OVERLAY_FILE
-}
-
-function generate_images_overlay(){
- echo "applications:" > /tmp/images-overlay.yaml
-
- charms_with_resources="nbi lcm mon pol ng-ui ro"
- [ -n "$INSTALL_PLA" ] && charms_with_resources+=" pla"
- for charm in $charms_with_resources; do
- cat << EOF > /tmp/${charm}_registry.yaml
-registrypath: ${REGISTRY_URL}opensourcemano/${charm}:$TAG
-EOF
- if [ ! -z "$REGISTRY_USERNAME" ] ; then
- echo username: $REGISTRY_USERNAME >> /tmp/${charm}_registry.yaml
- echo password: $REGISTRY_PASSWORD >> /tmp/${charm}_registry.yaml
- fi
-
- cat << EOF >> /tmp/images-overlay.yaml
- ${charm}:
- resources:
- ${charm}-image: /tmp/${charm}_registry.yaml
-
-EOF
- done
- ch_charms_with_resources="keystone"
- for charm in $ch_charms_with_resources; do
- cat << EOF > /tmp/${charm}_registry.yaml
-registrypath: ${REGISTRY_URL}opensourcemano/${charm}:$TAG
-EOF
- if [ ! -z "$REGISTRY_USERNAME" ] ; then
- echo username: $REGISTRY_USERNAME >> /tmp/${charm}_registry.yaml
- echo password: $REGISTRY_PASSWORD >> /tmp/${charm}_registry.yaml
- fi
-
- cat << EOF >> /tmp/images-overlay.yaml
- ${charm}:
- resources:
- ${charm}-image: /tmp/${charm}_registry.yaml
-
-EOF
- done
-
- mv /tmp/images-overlay.yaml $IMAGES_OVERLAY_FILE
-}
-
-function refresh_osmclient_snap() {
- osmclient_snap_install_refresh refresh
-}
-
-function install_osm_client_snap() {
- osmclient_snap_install_refresh install
-}
-
-function osmclient_snap_install_refresh() {
- channel_preference="stable candidate beta edge"
- for channel in $channel_preference; do
- echo "Trying to install osmclient from channel $OSMCLIENT_VERSION/$channel"
- sudo snap $1 osmclient --channel $OSMCLIENT_VERSION/$channel 2> /dev/null && echo osmclient snap installed && break
- done
-}
-function install_osmclient() {
- snap info osmclient | grep -E ^installed: && refresh_osmclient_snap || install_osm_client_snap
-}
-
-function add_local_k8scluster() {
- osm --all-projects vim-create \
- --name _system-osm-vim \
- --account_type dummy \
- --auth_url http://dummy \
- --user osm --password osm --tenant osm \
- --description "dummy" \
- --config '{management_network_name: mgmt}'
- tmpfile=$(mktemp --tmpdir=${HOME})
- cp ${KUBECONFIG} ${tmpfile}
- osm --all-projects k8scluster-add \
- --creds ${tmpfile} \
- --vim _system-osm-vim \
- --k8s-nets '{"net1": null}' \
- --version '1.19' \
- --description "OSM Internal Cluster" \
- _system-osm-k8s
- rm -f ${tmpfile}
-}
-
-function install_microstack() {
- sudo snap install microstack --beta --devmode
-
- CHECK=$(microstack.openstack server list)
- if [ $? -ne 0 ] ; then
- if [[ $CHECK == *"not initialized"* ]]; then
- echo "Setting MicroStack dashboard to listen to port 8080"
- sudo snap set microstack config.network.ports.dashboard=8080
- echo "Initializing MicroStack. This can take several minutes"
- sudo microstack.init --auto --control
- fi
- fi
-
- sudo snap alias microstack.openstack openstack
-
- echo "Updating default security group in MicroStack to allow all access"
-
- for i in $(microstack.openstack security group list | awk '/default/{ print $2 }'); do
- for PROTO in icmp tcp udp ; do
- echo " $PROTO ingress"
- CHECK=$(microstack.openstack security group rule create $i --protocol $PROTO --remote-ip 0.0.0.0/0 2>&1)
- if [ $? -ne 0 ] ; then
- if [[ $CHECK != *"409"* ]]; then
- echo "Error creating ingress rule for $PROTO"
- echo $CHECK
- fi
- fi
- done
- done
-
- microstack.openstack network show osm-ext &>/dev/null
- if [ $? -ne 0 ]; then
- echo "Creating osm-ext network with router to bridge to MicroStack external network"
- microstack.openstack network create --enable --no-share osm-ext
- microstack.openstack subnet create osm-ext-subnet --network osm-ext --dns-nameserver 8.8.8.8 \
- --subnet-range 172.30.0.0/24
- microstack.openstack router create external-router
- microstack.openstack router add subnet external-router osm-ext-subnet
- microstack.openstack router set --external-gateway external external-router
- fi
-
- microstack.openstack image list | grep ubuntu20.04 &> /dev/null
- if [ $? -ne 0 ] ; then
- echo "Fetching Ubuntu 20.04 image and upLoading to MicroStack"
- wget -q -O- https://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-amd64.img \
- | microstack.openstack image create --public --container-format=bare \
- --disk-format=qcow2 ubuntu20.04 | grep status
- fi
-
- if [ ! -f ~/.ssh/microstack ]; then
- ssh-keygen -t rsa -N "" -f ~/.ssh/microstack
- microstack.openstack keypair create --public-key ~/.ssh/microstack.pub microstack
- fi
-
- echo "Creating VIM microstack-site in OSM"
- . /var/snap/microstack/common/etc/microstack.rc
-
- osm vim-create \
- --name microstack-site \
- --user "$OS_USERNAME" \
- --password "$OS_PASSWORD" \
- --auth_url "$OS_AUTH_URL" \
- --tenant "$OS_USERNAME" \
- --account_type openstack \
- --config='{use_floating_ip: True,
- insecure: True,
- keypair: microstack,
- management_network_name: osm-ext}'
-}
-
-DEFAULT_IF=`ip route list match 0.0.0.0 | awk '{print $5; exit}'`
-DEFAULT_IP=`ip -o -4 a |grep ${DEFAULT_IF}|awk '{split($4,a,"/"); print a[1]; exit}'`
-DEFAULT_IF_MTU=`ip a show ${DEFAULT_IF} | grep mtu | awk '{print $5}'`
-
-check_arguments $@
-mkdir -p ~/.osm
-install_snaps
-bootstrap_k8s_lxd
-if [ -v ONLY_VCA ]; then
- HOME=/home/$USER
- k8scloud=microk8s
- lxdcloud=lxd-cloud:lxd-cloud
- controllers="`cat $HOME/.local/share/juju/controllers.yaml`"
- accounts="`cat $HOME/.local/share/juju/accounts.yaml`"
- publickey="`cat $HOME/.local/share/juju/ssh/juju_id_rsa.pub`"
- echo "Use the following command to register the installed VCA to your OSM VCA integrator charm"
- echo -e " juju config vca \\\n k8s-cloud=$k8scloud \\\n lxd-cloud=$lxdcloud \\\n controllers=$controllers \\\n accounts=$accounts \\\n public-key=$publickey"
- track deploy_osm deploy_vca_only_ok
-else
- deploy_charmed_osm
- track deploy_osm deploy_osm_services_k8s_ok
- install_osmclient
- track osmclient osmclient_ok
- export OSM_HOSTNAME=$(juju config -m $MODEL_NAME nbi external-hostname):443
- export OSM_PASSWORD=$keystone_admin_password
- sleep 10
- add_local_k8scluster
- track final_ops add_local_k8scluster_ok
- if [ -v MICROSTACK ]; then
- install_microstack
- track final_ops install_microstack_ok
- fi
-
- echo "Your installation is now complete, follow these steps for configuring the osmclient:"
- echo
- echo "1. Create the OSM_HOSTNAME environment variable with the NBI IP"
- echo
- echo "export OSM_HOSTNAME=$OSM_HOSTNAME"
- echo "export OSM_PASSWORD=$OSM_PASSWORD"
- echo
- echo "2. Add the previous commands to your .bashrc for other Shell sessions"
- echo
- echo "echo \"export OSM_HOSTNAME=$OSM_HOSTNAME\" >> ~/.bashrc"
- echo "echo \"export OSM_PASSWORD=$OSM_PASSWORD\" >> ~/.bashrc"
- echo
- echo "3. Login OSM GUI by using admin password: $OSM_PASSWORD"
- echo
- echo "DONE"
- track end
-fi
-
diff --git a/installers/charmed_uninstall.sh b/installers/charmed_uninstall.sh
deleted file mode 100755
index 386cb049..00000000
--- a/installers/charmed_uninstall.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#! /bin/bash
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-
-juju destroy-model osm --destroy-storage -y
-sudo snap unalias osm
-sudo snap remove osmclient
-CONTROLLER_NAME="osm-vca"
-CONTROLLER_PRESENT=$(juju controllers 2>/dev/null| grep ${CONTROLLER_NAME} | wc -l)
-if [[ $CONTROLLER_PRESENT -ge 1 ]]; then
- cat << EOF
-The VCA with the name "${CONTROLLER_NAME}" has been left in place to ensure that no other
-applications are using it. If you are sure you wish to remove this controller,
-please execute the following command:
-
- juju destroy-controller --release-storage --destroy-all-models -y ${CONTROLLER_NAME}
-
-EOF
-fi
diff --git a/installers/full_install_osm.sh b/installers/full_install_osm.sh
index f0723eac..522d2283 100755
--- a/installers/full_install_osm.sh
+++ b/installers/full_install_osm.sh
@@ -21,7 +21,7 @@ function usage(){
echo -e " -h / --help: print this help"
echo -e " -y: do not prompt for confirmation, assumes yes"
echo -e " -r : use specified repository name for osm packages"
- echo -e " -R : use specified release for osm binaries (deb packages, lxd images, ...)"
+ echo -e " -R : use specified release for osm binaries (deb packages, ...)"
echo -e " -u : use specified repository url for osm packages"
echo -e " -k : use specified repository public key url"
echo -e " -a : use this apt proxy url when downloading apt packages (air-gapped installation)"
@@ -33,92 +33,20 @@ function usage(){
echo -e " --no-aux-cluster: Do not provision an auxiliary cluster for cloud-native gitops operations in OSM (NEW in Release SIXTEEN) (by default, it is installed)"
echo -e " -D : use local devops installation path"
echo -e " -s namespace when installed using k8s, default is osm"
- echo -e " -H use specific juju host controller IP"
- echo -e " -S use VCA/juju secret key"
- echo -e " -P use VCA/juju public key file"
- echo -e " -A use VCA/juju API proxy"
echo -e " -w : Location to store runtime installation"
- echo -e " -l: LXD cloud yaml file"
- echo -e " -L: LXD credentials yaml file"
echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
echo -e " -d use docker registry URL instead of dockerhub"
echo -e " -p set docker proxy URL as part of docker CE configuration"
echo -e " -T specify docker tag for the modules specified with option -m"
echo -e " --debug: debug mode"
- echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
- echo -e " --cachelxdimages: cache local lxd images, create cronjob for that cache (will make installation longer)"
- echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
- echo -e " --nojuju: do not juju, assumes already installed"
- echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
echo -e " --nohostclient: do not install the osmclient"
echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
echo -e " --showopts: print chosen options and exit (only for debugging)"
- echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
- echo -e " [--bundle ]: Specify with which bundle to deploy OSM with charms (--charmed option)"
- echo -e " [--k8s ]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
- echo -e " [--vca ]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
- echo -e " [--small-profile]: Do not install and configure LXD which aims to use only K8s Clouds (--charmed option)"
- echo -e " [--lxd ]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
- echo -e " [--lxd-cred ]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
- echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
- echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
- echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
- echo -e " [--tag]: Docker image tag. (--charmed option)"
- echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
[ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
}
-# takes a juju/accounts.yaml file and returns the password specific
-# for a controller. I wrote this using only bash tools to minimize
-# additions of other packages
-function parse_juju_password {
- [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
- password_file="${HOME}/.local/share/juju/accounts.yaml"
- local controller_name=$1
- local s='[[:space:]]*' w='[a-zA-Z0-9_-]*' fs=$(echo @|tr @ '\034')
- sed -ne "s|^\($s\):|\1|" \
- -e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
- -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $password_file |
- awk -F$fs -v controller=$controller_name '{
- indent = length($1)/2;
- vname[indent] = $2;
- for (i in vname) {if (i > indent) {delete vname[i]}}
- if (length($3) > 0) {
- vn=""; for (i=0; i ${OSM_HELM_WORK_DIR}/osm-values.yaml
-vca:
- pubkey: \"${OSM_VCA_PUBKEY}\"
-EOF"
- fi
# Generate helm values to be passed with --set
OSM_HELM_OPTS=""
@@ -228,18 +150,6 @@ EOF"
OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.gitops.pubkey=${AGE_MGMT_PUBKEY}"
fi
- if [ -n "${INSTALL_JUJU}" ]; then
- OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.enabled=true"
- OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.host=${OSM_VCA_HOST}"
- OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.secret=${OSM_VCA_SECRET}"
- OSM_HELM_OPTS="${OSM_HELM_OPTS} --set vca.cacert=${OSM_VCA_CACERT}"
- fi
- [ -n "$OSM_VCA_APIPROXY" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set lcm.config.OSMLCM_VCA_APIPROXY=${OSM_VCA_APIPROXY}"
-
- OSM_HELM_OPTS="${OSM_HELM_OPTS} --set airflow.defaultAirflowRepository=${DOCKER_REGISTRY_URL}${DOCKER_USER}/airflow"
- [ ! "$OSM_DOCKER_TAG" == "testing-daily" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set-string airflow.defaultAirflowTag=${OSM_DOCKER_TAG}"
- OSM_HELM_OPTS="${OSM_HELM_OPTS} --set airflow.ingress.web.hosts[0].name=airflow.${OSM_K8S_EXTERNAL_IP}.nip.io"
-
if [ -n "${OSM_BEHIND_PROXY}" ]; then
OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.behindHttpProxy=true"
[ -n "${HTTP_PROXY}" ] && OSM_HELM_OPTS="${OSM_HELM_OPTS} --set global.httpProxy.HTTP_PROXY=\"${HTTP_PROXY}\""
@@ -255,9 +165,6 @@ EOF"
fi
fi
- if [ -n "${INSTALL_JUJU}" ]; then
- OSM_HELM_OPTS="-f ${OSM_HELM_WORK_DIR}/osm-values.yaml ${OSM_HELM_OPTS}"
- fi
echo "helm upgrade --install -n $OSM_NAMESPACE --create-namespace $OSM_NAMESPACE $OSM_DEVOPS/installers/helm/osm ${OSM_HELM_OPTS}"
helm upgrade --install -n $OSM_NAMESPACE --create-namespace $OSM_NAMESPACE $OSM_DEVOPS/installers/helm/osm ${OSM_HELM_OPTS}
# Override existing values.yaml with the final values.yaml used to install OSM
@@ -307,11 +214,10 @@ function ask_proceed() {
[ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
[ -z "$ASSUME_YES" ] && ! ask_user "The installation will do the following
- 1. Install and configure LXD
- 2. Install juju
- 3. Install docker CE
- 4. Disable swap space
- 5. Install and initialize Kubernetes
+ 1. Install required packages
+ 2. Install docker CE
+ 3. Disable swap space
+ 4. Install and initialize Kubernetes
as pre-requirements.
Do you want to proceed (Y/n)? " y && echo "Cancelled!" && exit 1
@@ -339,19 +245,19 @@ The following env variables have been found for the current user:
${OSM_PROXY_ENV_VARIABLES}.
This suggests that this machine is behind a proxy and a special configuration is required.
-The installer will install Docker CE, LXD and Juju to work behind a proxy using those
+The installer will install Docker CE and a Kubernetes to work behind a proxy using those
env variables.
-Take into account that the installer uses apt, curl, wget, docker, lxd, juju and snap.
+Take into account that the installer uses apt, curl, wget and docker.
Depending on the program, the env variables to work behind a proxy might be different
(e.g. http_proxy vs HTTP_PROXY).
For that reason, it is strongly recommended that at least http_proxy, https_proxy, HTTP_PROXY
and HTTPS_PROXY are defined.
-Finally, some of the programs (apt, snap) those programs are run as sudoer, requiring that
-those env variables are also set for root user. If you are not sure whether those variables
-are configured for the root user, you can stop the installation now.
+Finally, some of the programs (apt) are run as sudoer, requiring that those env variables
+are also set for root user. If you are not sure whether those variables are configured for
+the root user, you can stop the installation now.
Do you want to proceed with the installation (Y/n)? " y && echo "Cancelled!" && exit 1
else
@@ -364,27 +270,14 @@ Do you want to proceed with the installation (Y/n)? " y && echo "Cancelled!" &&
function find_devops_folder() {
[ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
if [ -z "$OSM_DEVOPS" ]; then
- if [ -n "$TEST_INSTALLER" ]; then
- echo -e "\nUsing local devops repo for OSM installation"
- OSM_DEVOPS="$(dirname $(realpath $(dirname $0)))"
- else
- echo -e "\nCreating temporary dir for OSM installation"
- OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
- trap 'rm -rf "$OSM_DEVOPS"' EXIT
- git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
- fi
+ echo -e "\nCreating temporary dir for OSM installation"
+ OSM_DEVOPS="$(mktemp -d -q --tmpdir "installosm.XXXXXX")"
+ trap 'rm -rf "$OSM_DEVOPS"' EXIT
+ git clone https://osm.etsi.org/gerrit/osm/devops.git $OSM_DEVOPS
fi
[ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
}
-function install_lxd() {
- [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
- LXD_INSTALL_OPTS="-D ${OSM_DEVOPS} -i ${OSM_DEFAULT_IF} ${DEBUG_INSTALL}"
- [ -n "${OSM_BEHIND_PROXY}" ] && LXD_INSTALL_OPTS="${LXD_INSTALL_OPTS} -P"
- $OSM_DEVOPS/installers/install_lxd.sh ${LXD_INSTALL_OPTS} || FATAL_TRACK lxd "install_lxd.sh failed"
- [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
-}
-
function install_docker_ce() {
[ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
DOCKER_CE_OPTS="-D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
@@ -451,10 +344,7 @@ function install_osm() {
trap ctrl_c INT
check_osm_behind_proxy
- check_packages "git wget curl tar snapd"
- if [ -n "${INSTALL_JUJU}" ]; then
- sudo snap install jq || FATAL "Could not install jq (snap package). Make sure that snap works"
- fi
+ check_packages "git wget curl tar"
find_devops_folder
track start release $RELEASE none none docker_tag $OSM_DOCKER_TAG none none installation_type $OSM_INSTALLATION_TYPE none none os_info $os_info none none
@@ -475,9 +365,6 @@ function install_osm() {
# configure apt proxy
[ -n "$APT_PROXY_URL" ] && configure_apt_proxy $APT_PROXY_URL
- # if lxd is requested, we will install it
- [ -n "$INSTALL_LXD" ] && install_lxd
-
track prereq prereqok_ok
if [ -n "$INSTALL_DOCKER" ] || [ "${K8S_CLUSTER_ENGINE}" == "kubeadm" ]; then
@@ -499,22 +386,6 @@ function install_osm() {
kubectl create namespace ${OSM_NAMESPACE}
track k8scluster k8scluster_ok
- if [ -n "${INSTALL_JUJU}" ]; then
- echo "Installing Juju ..."
- JUJU_OPTS="-D ${OSM_DEVOPS} -s ${OSM_NAMESPACE} -i ${OSM_DEFAULT_IP} ${DEBUG_INSTALL} ${INSTALL_CACHELXDIMAGES}"
- [ -n "${OSM_VCA_HOST}" ] && JUJU_OPTS="$JUJU_OPTS -H ${OSM_VCA_HOST}"
- [ -n "${LXD_CLOUD_FILE}" ] && JUJU_OPTS="$JUJU_OPTS -l ${LXD_CLOUD_FILE}"
- [ -n "${LXD_CRED_FILE}" ] && JUJU_OPTS="$JUJU_OPTS -L ${LXD_CRED_FILE}"
- [ -n "${CONTROLLER_NAME}" ] && JUJU_OPTS="$JUJU_OPTS -K ${CONTROLLER_NAME}"
- [ -n "${OSM_BEHIND_PROXY}" ] && JUJU_OPTS="${JUJU_OPTS} -P"
- $OSM_DEVOPS/installers/install_juju.sh ${JUJU_OPTS} || FATAL_TRACK juju "install_juju.sh failed"
- set_vca_variables
- fi
- track juju juju_ok
-
- # This track is maintained for backwards compatibility
- track docker_images docker_images_ok
-
# Install mgmt cluster
echo "Installing mgmt cluster ..."
MGMTCLUSTER_INSTALL_OPTS="-D ${OSM_DEVOPS} ${DEBUG_INSTALL}"
@@ -557,11 +428,6 @@ function install_osm() {
add_local_k8scluster
track final_ops add_local_k8scluster_ok
- # if lxd is requested, iptables firewall is updated to work with both docker and LXD
- if [ -n "$INSTALL_LXD" ]; then
- arrange_docker_default_network_policy
- fi
-
wget -q -O- https://osm-download.etsi.org/ftp/osm-16.0-sixteen/README2.txt &> /dev/null
track end
sudo find /etc/osm
@@ -569,13 +435,6 @@ function install_osm() {
return 0
}
-function arrange_docker_default_network_policy() {
- echo -e "Fixing firewall so docker and LXD can share the same host without affecting each other."
- sudo iptables -I DOCKER-USER -j ACCEPT
- sudo iptables-save | sudo tee /etc/iptables/rules.v4
- sudo ip6tables-save | sudo tee /etc/iptables/rules.v6
-}
-
function install_k8s_monitoring() {
[ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
# install OSM monitoring
@@ -591,30 +450,21 @@ function dump_vars(){
echo "DOCKER_PROXY_URL=$DOCKER_PROXY_URL"
echo "DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL"
echo "DOCKER_USER=$DOCKER_USER"
- echo "INSTALL_CACHELXDIMAGES=$INSTALL_CACHELXDIMAGES"
- echo "INSTALL_JUJU=$INSTALL_JUJU"
echo "INSTALL_K8S_MONITOR=$INSTALL_K8S_MONITOR"
- echo "INSTALL_LXD=$INSTALL_LXD"
echo "INSTALL_DOCKER=$INSTALL_DOCKER"
echo "OSM_DEVOPS=$OSM_DEVOPS"
echo "OSM_DOCKER_TAG=$OSM_DOCKER_TAG"
echo "OSM_K8S_EXTERNAL_IP=$OSM_K8S_EXTERNAL_IP"
echo "OSM_HELM_WORK_DIR=$OSM_HELM_WORK_DIR"
echo "OSM_NAMESPACE=$OSM_NAMESPACE"
- echo "OSM_VCA_HOST=$OSM_VCA_HOST"
- echo "OSM_VCA_PUBKEY=$OSM_VCA_PUBKEY"
- echo "OSM_VCA_SECRET=$OSM_VCA_SECRET"
echo "OSM_WORK_DIR=$OSM_WORK_DIR"
echo "PULL_IMAGES=$PULL_IMAGES"
- echo "RECONFIGURE=$RECONFIGURE"
echo "RELEASE=$RELEASE"
echo "REPOSITORY=$REPOSITORY"
echo "REPOSITORY_BASE=$REPOSITORY_BASE"
echo "REPOSITORY_KEY=$REPOSITORY_KEY"
echo "SHOWOPTS=$SHOWOPTS"
- echo "TEST_INSTALLER=$TEST_INSTALLER"
echo "UNINSTALL=$UNINSTALL"
- echo "UPDATE=$UPDATE"
[ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
}
@@ -634,10 +484,6 @@ function ctrl_c() {
}
UNINSTALL=""
-UPDATE=""
-RECONFIGURE=""
-TEST_INSTALLER=""
-INSTALL_LXD=""
SHOWOPTS=""
ASSUME_YES=""
APT_PROXY_URL=""
@@ -646,20 +492,11 @@ DEBUG_INSTALL=""
RELEASE="testing-daily"
REPOSITORY="testing"
INSTALL_K8S_MONITOR=""
-LXD_REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/lxd"
-LXD_REPOSITORY_PATH=""
INSTALL_DOCKER=""
-INSTALL_JUJU=""
INSTALL_NOHOSTCLIENT=""
-INSTALL_CACHELXDIMAGES=""
INSTALL_AUX_CLUSTER="y"
INSTALL_MGMT_CLUSTER="y"
OSM_DEVOPS=
-OSM_VCA_HOST=
-OSM_VCA_SECRET=
-OSM_VCA_PUBKEY=
-OSM_VCA_CLOUDNAME="localhost"
-OSM_VCA_K8S_CLOUDNAME="k8scloud"
OSM_NAMESPACE=osm
REPOSITORY_KEY="OSM%20ETSI%20Release%20Key.gpg"
REPOSITORY_BASE="https://osm-download.etsi.org/repository/osm/debian"
@@ -688,7 +525,7 @@ DOCKER_PROXY_URL=
MODULE_DOCKER_TAG=
OSM_INSTALLATION_TYPE="Default"
-while getopts ":a:c:e:r:n:k:u:R:D:o:O:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:G:M:-: hy" o; do
+while getopts ":a:c:e:r:n:k:u:R:D:o:O:N:s:t:U:l:L:K:d:p:T:f:F:G:M:-: hy" o; do
case "${o}" in
a)
APT_PROXY_URL=${OPTARG}
@@ -722,12 +559,6 @@ while getopts ":a:c:e:r:n:k:u:R:D:o:O:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:G:M:-: hy"
D)
OSM_DEVOPS="${OPTARG}"
;;
- H)
- OSM_VCA_HOST="${OPTARG}"
- ;;
- S)
- OSM_VCA_SECRET="${OPTARG}"
- ;;
s)
OSM_NAMESPACE="${OPTARG}" && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
;;
@@ -738,18 +569,6 @@ while getopts ":a:c:e:r:n:k:u:R:D:o:O:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:G:M:-: hy"
U)
DOCKER_USER="${OPTARG}"
;;
- P)
- OSM_VCA_PUBKEY=$(cat ${OPTARG})
- ;;
- A)
- OSM_VCA_APIPROXY="${OPTARG}"
- ;;
- l)
- LXD_CLOUD_FILE="${OPTARG}"
- ;;
- L)
- LXD_CRED_FILE="${OPTARG}"
- ;;
K)
CONTROLLER_NAME="${OPTARG}"
;;
@@ -774,33 +593,11 @@ while getopts ":a:c:e:r:n:k:u:R:D:o:O:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:G:M:-: hy"
[ "${OPTARG}" == "uninstall" ] && UNINSTALL="y" && continue
[ "${OPTARG}" == "no-mgmt-cluster" ] && INSTALL_MGMT_CLUSTER="" && continue
[ "${OPTARG}" == "no-aux-cluster" ] && INSTALL_AUX_CLUSTER="" && continue
- [ "${OPTARG}" == "update" ] && UPDATE="y" && continue
- [ "${OPTARG}" == "reconfigure" ] && RECONFIGURE="y" && continue
- [ "${OPTARG}" == "test" ] && TEST_INSTALLER="y" && continue
- [ "${OPTARG}" == "lxdinstall" ] && INSTALL_LXD="y" && continue
- [ "${OPTARG}" == "lxd" ] && INSTALL_LXD="y" && continue
- [ "${OPTARG}" == "nolxd" ] && INSTALL_LXD="" && continue
[ "${OPTARG}" == "docker" ] && INSTALL_DOCKER="y" && continue
[ "${OPTARG}" == "nodocker" ] && INSTALL_DOCKER="" && continue
[ "${OPTARG}" == "showopts" ] && SHOWOPTS="y" && continue
- [ "${OPTARG}" == "juju" ] && INSTALL_JUJU="y" && continue
- [ "${OPTARG}" == "nojuju" ] && INSTALL_JUJU="" && continue
[ "${OPTARG}" == "nohostclient" ] && INSTALL_NOHOSTCLIENT="y" && continue
[ "${OPTARG}" == "k8s_monitor" ] && INSTALL_K8S_MONITOR="y" && continue
- [ "${OPTARG}" == "charmed" ] && CHARMED="y" && OSM_INSTALLATION_TYPE="Charmed" && continue
- [ "${OPTARG}" == "bundle" ] && continue
- [ "${OPTARG}" == "k8s" ] && continue
- [ "${OPTARG}" == "lxd-cred" ] && continue
- [ "${OPTARG}" == "microstack" ] && continue
- [ "${OPTARG}" == "overlay" ] && continue
- [ "${OPTARG}" == "only-vca" ] && continue
- [ "${OPTARG}" == "small-profile" ] && continue
- [ "${OPTARG}" == "vca" ] && continue
- [ "${OPTARG}" == "ha" ] && continue
- [ "${OPTARG}" == "tag" ] && continue
- [ "${OPTARG}" == "registry" ] && continue
- [ "${OPTARG}" == "nocachelxdimages" ] && continue
- [ "${OPTARG}" == "cachelxdimages" ] && INSTALL_CACHELXDIMAGES="--cachelxdimages" && continue
echo -e "Invalid option: '--$OPTARG'\n" >&2
usage && exit 1
;;
@@ -831,13 +628,8 @@ source $OSM_DEVOPS/common/all_funcs
# Uninstall if "--uninstall"
if [ -n "$UNINSTALL" ]; then
- if [ -n "$CHARMED" ]; then
- ${OSM_DEVOPS}/installers/charmed_uninstall.sh -R $RELEASE -r $REPOSITORY -u $REPOSITORY_BASE -D $OSM_DEVOPS -t $DOCKER_TAG "$@" || \
- FATAL_TRACK charmed_uninstall "charmed_uninstall.sh failed"
- else
- ${OSM_DEVOPS}/installers/uninstall_osm.sh "$@" || \
- FATAL_TRACK community_uninstall "uninstall_osm.sh failed"
- fi
+ ${OSM_DEVOPS}/installers/uninstall_osm.sh "$@" || \
+ FATAL_TRACK community_uninstall "uninstall_osm.sh failed"
echo -e "\nDONE"
exit 0
fi
diff --git a/installers/install_juju.sh b/installers/install_juju.sh
deleted file mode 100755
index 7be5f99c..00000000
--- a/installers/install_juju.sh
+++ /dev/null
@@ -1,283 +0,0 @@
-#!/bin/bash
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-function usage(){
- [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
- echo -e "usage: $0 [OPTIONS]"
- echo -e "Install Juju for OSM"
- echo -e " OPTIONS"
- echo -e " -h / --help: print this help"
- echo -e " -D use local devops installation path"
- echo -e " -s or user defined stack name when installed using swarm or namespace when installed using k8s, default is osm"
- echo -e " -H use specific juju host controller IP"
- echo -e " -S use VCA/juju secret key"
- echo -e " -P use VCA/juju public key file"
- echo -e " -l: LXD cloud yaml file"
- echo -e " -L: LXD credentials yaml file"
- echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
- echo -e " --debug: debug mode"
- echo -e " --cachelxdimages: cache local lxd images, create cronjob for that cache (will make installation longer)"
- [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
-}
-
-function update_juju_images(){
- [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
- crontab -l | grep update-juju-lxc-images || (crontab -l 2>/dev/null; echo "0 4 * * 6 $USER ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic") | crontab -
- ${OSM_DEVOPS}/installers/update-juju-lxc-images --xenial --bionic
- [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
-}
-
-function install_juju_client() {
- [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
- echo "Installing juju client"
- sudo snap install juju --classic --channel=$JUJU_VERSION/stable
- [[ ":$PATH": != *":/snap/bin:"* ]] && PATH="/snap/bin:${PATH}"
- [ -n "$INSTALL_CACHELXDIMAGES" ] && update_juju_images
- echo "Finished installation of juju client"
- [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
- return 0
-}
-
-function juju_createcontroller_k8s(){
- [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
- cat $HOME/.kube/config | juju add-k8s $OSM_VCA_K8S_CLOUDNAME --client \
- || FATAL_TRACK juju "Failed to add K8s endpoint and credential for client in cloud $OSM_VCA_K8S_CLOUDNAME"
-
- JUJU_BOOTSTRAP_OPTS=""
- if [ -n "${OSM_BEHIND_PROXY}" ] ; then
- K8S_SVC_CLUSTER_IP=$(kubectl get svc/kubernetes -o jsonpath='{.spec.clusterIP}')
- NO_PROXY="${NO_PROXY},${K8S_SVC_CLUSTER_IP},.svc,.cluster.local"
- mkdir -p /tmp/.osm
- JUJU_MODEL_CONFIG_FILE=/tmp/.osm/model-config.yaml
- cat << EOF > $JUJU_MODEL_CONFIG_FILE
-apt-http-proxy: ${HTTP_PROXY}
-apt-https-proxy: ${HTTPS_PROXY}
-juju-http-proxy: ${HTTP_PROXY}
-juju-https-proxy: ${HTTPS_PROXY}
-juju-no-proxy: ${NO_PROXY}
-snap-http-proxy: ${HTTP_PROXY}
-snap-https-proxy: ${HTTPS_PROXY}
-EOF
- JUJU_BOOTSTRAP_OPTS="--model-default /tmp/.osm/model-config.yaml"
- fi
- juju bootstrap -v --debug $OSM_VCA_K8S_CLOUDNAME $OSM_NAMESPACE \
- --config controller-service-type=loadbalancer \
- --agent-version=$JUJU_AGENT_VERSION \
- ${JUJU_BOOTSTRAP_OPTS} \
- || FATAL_TRACK juju "Failed to bootstrap controller $OSM_NAMESPACE in cloud $OSM_VCA_K8S_CLOUDNAME"
- [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
-}
-
-function juju_addlxd_cloud(){
- [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
- mkdir -p /tmp/.osm
- OSM_VCA_CLOUDNAME="lxd-cloud"
- LXDENDPOINT=$DEFAULT_IP
- LXD_CLOUD=/tmp/.osm/lxd-cloud.yaml
- LXD_CREDENTIALS=/tmp/.osm/lxd-credentials.yaml
-
- cat << EOF > $LXD_CLOUD
-clouds:
- $OSM_VCA_CLOUDNAME:
- type: lxd
- auth-types: [certificate]
- endpoint: "https://$LXDENDPOINT:8443"
- config:
- ssl-hostname-verification: false
-EOF
- openssl req -nodes -new -x509 -keyout /tmp/.osm/client.key -out /tmp/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
- cat << EOF > $LXD_CREDENTIALS
-credentials:
- $OSM_VCA_CLOUDNAME:
- lxd-cloud:
- auth-type: certificate
- server-cert: /var/snap/lxd/common/lxd/server.crt
- client-cert: /tmp/.osm/client.crt
- client-key: /tmp/.osm/client.key
-EOF
- lxc config trust add local: /tmp/.osm/client.crt
- juju add-cloud -c $OSM_NAMESPACE $OSM_VCA_CLOUDNAME $LXD_CLOUD --force
- juju add-credential -c $OSM_NAMESPACE $OSM_VCA_CLOUDNAME -f $LXD_CREDENTIALS
- sg lxd -c "lxd waitready"
- juju controller-config features=[k8s-operators]
- if [ -n "${OSM_BEHIND_PROXY}" ] ; then
- if [ -n "${HTTP_PROXY}" ]; then
- juju model-default lxd-cloud apt-http-proxy="$HTTP_PROXY"
- juju model-default lxd-cloud juju-http-proxy="$HTTP_PROXY"
- juju model-default lxd-cloud snap-http-proxy="$HTTP_PROXY"
- fi
- if [ -n "${HTTPS_PROXY}" ]; then
- juju model-default lxd-cloud apt-https-proxy="$HTTPS_PROXY"
- juju model-default lxd-cloud juju-https-proxy="$HTTPS_PROXY"
- juju model-default lxd-cloud snap-https-proxy="$HTTPS_PROXY"
- fi
- [ -n "${NO_PROXY}" ] && juju model-default lxd-cloud juju-no-proxy="$NO_PROXY"
- fi
- [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
-}
-
-#Safe unattended install of iptables-persistent
-function check_install_iptables_persistent(){
- [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
- echo -e "\nChecking required packages: iptables-persistent"
- if ! dpkg -l iptables-persistent &>/dev/null; then
- echo -e " Not installed.\nInstalling iptables-persistent requires root privileges"
- echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
- echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
- sudo apt-get -yq install iptables-persistent
- fi
- [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
-}
-
-function juju_createproxy() {
- [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
- check_install_iptables_persistent
-
- if ! sudo iptables -t nat -C PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST; then
- sudo iptables -t nat -A PREROUTING -p tcp -m tcp -d $DEFAULT_IP --dport 17070 -j DNAT --to-destination $OSM_VCA_HOST
- sudo netfilter-persistent save
- fi
- [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
-}
-
-DEBUG_INSTALL=""
-INSTALL_CACHELXDIMAGES=""
-INSTALL_NOJUJU=""
-JUJU_AGENT_VERSION=2.9.43
-JUJU_VERSION=2.9
-OSM_BEHIND_PROXY=""
-OSM_DEVOPS=
-OSM_NAMESPACE=osm
-OSM_VCA_HOST=
-OSM_VCA_CLOUDNAME="localhost"
-OSM_VCA_K8S_CLOUDNAME="k8scloud"
-RE_CHECK='^[a-z0-9]([-a-z0-9]*[a-z0-9])?$'
-
-while getopts ":D:i:s:H:l:L:K:-: hP" o; do
- case "${o}" in
- D)
- OSM_DEVOPS="${OPTARG}"
- ;;
- i)
- DEFAULT_IP="${OPTARG}"
- ;;
- s)
- OSM_NAMESPACE="${OPTARG}" && [[ ! "${OPTARG}" =~ $RE_CHECK ]] && echo "Namespace $OPTARG is invalid. Regex used for validation is $RE_CHECK" && exit 0
- ;;
- H)
- OSM_VCA_HOST="${OPTARG}"
- ;;
- l)
- LXD_CLOUD_FILE="${OPTARG}"
- ;;
- L)
- LXD_CRED_FILE="${OPTARG}"
- ;;
- K)
- CONTROLLER_NAME="${OPTARG}"
- ;;
- P)
- OSM_BEHIND_PROXY="y"
- ;;
- -)
- [ "${OPTARG}" == "help" ] && usage && exit 0
- [ "${OPTARG}" == "debug" ] && DEBUG_INSTALL="--debug" && continue
- [ "${OPTARG}" == "cachelxdimages" ] && INSTALL_CACHELXDIMAGES="y" && continue
- echo -e "Invalid option: '--$OPTARG'\n" >&2
- usage && exit 1
- ;;
- :)
- echo "Option -$OPTARG requires an argument" >&2
- usage && exit 1
- ;;
- \?)
- echo -e "Invalid option: '-$OPTARG'\n" >&2
- usage && exit 1
- ;;
- h)
- usage && exit 0
- ;;
- *)
- usage && exit 1
- ;;
- esac
-done
-
-source $OSM_DEVOPS/common/logging
-source $OSM_DEVOPS/common/track
-
-echo "DEBUG_INSTALL=$DEBUG_INSTALL"
-echo "DEFAULT_IP=$DEFAULT_IP"
-echo "OSM_BEHIND_PROXY=$OSM_BEHIND_PROXY"
-echo "OSM_DEVOPS=$OSM_DEVOPS"
-echo "HOME=$HOME"
-
-[ -z "$INSTALL_NOJUJU" ] && install_juju_client
-track juju juju_client_ok
-
-if [ -z "$OSM_VCA_HOST" ]; then
- if [ -z "$CONTROLLER_NAME" ]; then
- juju_createcontroller_k8s
- juju_addlxd_cloud
- if [ -n "$LXD_CLOUD_FILE" ]; then
- [ -z "$LXD_CRED_FILE" ] && FATAL_TRACK juju "The installer needs the LXD credential yaml if the LXD is external"
- OSM_VCA_CLOUDNAME="lxd-cloud"
- juju add-cloud $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud $OSM_VCA_CLOUDNAME --client -f $LXD_CLOUD_FILE
- juju add-credential $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential $OSM_VCA_CLOUDNAME lxd-cloud-creds -f $LXD_CRED_FILE
- fi
- juju_createproxy
- else
- OSM_VCA_CLOUDNAME="lxd-cloud"
- if [ -n "$LXD_CLOUD_FILE" ]; then
- [ -z "$LXD_CRED_FILE" ] && FATAL_TRACK juju "The installer needs the LXD credential yaml if the LXD is external"
- juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME $LXD_CLOUD_FILE --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f $LXD_CLOUD_FILE
- juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f $LXD_CRED_FILE || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f $LXD_CRED_FILE
- else
- mkdir -p ~/.osm
- cat << EOF > ~/.osm/lxd-cloud.yaml
-clouds:
- lxd-cloud:
- type: lxd
- auth-types: [certificate]
- endpoint: "https://$DEFAULT_IP:8443"
- config:
- ssl-hostname-verification: false
-EOF
- openssl req -nodes -new -x509 -keyout ~/.osm/client.key -out ~/.osm/client.crt -days 365 -subj "/C=FR/ST=Nice/L=Nice/O=ETSI/OU=OSM/CN=osm.etsi.org"
- local server_cert=`cat /var/snap/lxd/common/lxd/server.crt | sed 's/^/ /'`
- local client_cert=`cat ~/.osm/client.crt | sed 's/^/ /'`
- local client_key=`cat ~/.osm/client.key | sed 's/^/ /'`
- cat << EOF > ~/.osm/lxd-credentials.yaml
-credentials:
- lxd-cloud:
- lxd-cloud:
- auth-type: certificate
- server-cert: |
-$server_cert
- client-cert: |
-$client_cert
- client-key: |
-$client_key
-EOF
- lxc config trust add local: ~/.osm/client.crt
- juju add-cloud -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME ~/.osm/lxd-cloud.yaml --force || juju update-cloud lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-cloud.yaml
- juju add-credential -c $CONTROLLER_NAME $OSM_VCA_CLOUDNAME -f ~/.osm/lxd-credentials.yaml || juju update-credential lxd-cloud -c $CONTROLLER_NAME -f ~/.osm/lxd-credentials.yaml
- fi
- fi
- [ -z "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`sg lxd -c "juju show-controller $OSM_NAMESPACE"|grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
- [ -n "$CONTROLLER_NAME" ] && OSM_VCA_HOST=`juju show-controller $CONTROLLER_NAME |grep api-endpoints|awk -F\' '{print $2}'|awk -F\: '{print $1}'`
- [ -z "$OSM_VCA_HOST" ] && FATAL_TRACK juju "Cannot obtain juju controller IP address"
-fi
-track juju juju_controller_ok
diff --git a/installers/install_lxd.sh b/installers/install_lxd.sh
deleted file mode 100755
index 60cf91eb..00000000
--- a/installers/install_lxd.sh
+++ /dev/null
@@ -1,130 +0,0 @@
-#!/bin/bash
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-set +eux
-
-function usage(){
- [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
- echo -e "usage: $0 [OPTIONS]"
- echo -e "Install Juju for OSM"
- echo -e " OPTIONS"
- echo -e " -h / --help: print this help"
- echo -e " -D use local devops installation path"
- echo -e " -H use specific juju host controller IP"
- echo -e " -S use VCA/juju secret key"
- echo -e " -P use VCA/juju public key file"
- echo -e " -l: LXD cloud yaml file"
- echo -e " -L: LXD credentials yaml file"
- echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
- echo -e " --debug: debug mode"
- echo -e " --cachelxdimages: cache local lxd images, create cronjob for that cache (will make installation longer)"
- echo -e " --nojuju: do not juju, assumes already installed"
- [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
-}
-
-function install_lxd() {
- [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
- # Apply sysctl production values for optimal performance
- sudo cp ${OSM_DEVOPS}/installers/lxd/60-lxd-production.conf /etc/sysctl.d/60-lxd-production.conf
- sudo sysctl --system
-
- # Install LXD snap
- sudo apt-get remove --purge -y liblxc1 lxc-common lxcfs lxd lxd-client
- snap info lxd | grep installed > /dev/null
- if [ $? -eq 0 ]; then
- sudo snap refresh lxd --channel $LXD_VERSION/stable
- else
- sudo snap install lxd --channel $LXD_VERSION/stable
- fi
-
- # Get default iface, IP and MTU
- if [ -n "${OSM_DEFAULT_IF}" ]; then
- OSM_DEFAULT_IF=$(ip route list|awk '$1=="default" {print $5; exit}')
- [ -z "${OSM_DEFAULT_IF}" ] && OSM_DEFAULT_IF=$(route -n |awk '$1~/^0.0.0.0/ {print $8; exit}')
- [ -z "${OSM_DEFAULT_IF}" ] && FATAL_TRACK lxd "Not possible to determine the interface with the default route 0.0.0.0"
- fi
- DEFAULT_MTU=$(ip addr show ${OSM_DEFAULT_IF} | perl -ne 'if (/mtu\s(\d+)/) {print $1;}')
- OSM_DEFAULT_IP=`ip -o -4 a s ${OSM_DEFAULT_IF} |awk '{split($4,a,"/"); print a[1]; exit}'`
- [ -z "$OSM_DEFAULT_IP" ] && FATAL_TRACK lxd "Not possible to determine the IP address of the interface with the default route"
-
- # Configure LXD
- sudo usermod -a -G lxd `whoami`
- cat ${OSM_DEVOPS}/installers/lxd/lxd-preseed.conf | sed 's/^config: {}/config:\n core.https_address: '$OSM_DEFAULT_IP':8443/' | sg lxd -c "lxd init --preseed"
- sg lxd -c "lxd waitready"
-
- # Configure LXD to work behind a proxy
- if [ -n "${OSM_BEHIND_PROXY}" ] ; then
- [ -n "${HTTP_PROXY}" ] && sg lxd -c "lxc config set core.proxy_http $HTTP_PROXY"
- [ -n "${HTTPS_PROXY}" ] && sg lxd -c "lxc config set core.proxy_https $HTTPS_PROXY"
- [ -n "${NO_PROXY}" ] && sg lxd -c "lxc config set core.proxy_ignore_hosts $NO_PROXY"
- fi
-
- [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
-}
-
-DEBUG_INSTALL=""
-LXD_VERSION=5.0
-OSM_DEVOPS=
-OSM_BEHIND_PROXY=""
-
-# main
-while getopts ":D:d:i:-: hP" o; do
- case "${o}" in
- i)
- OSM_DEFAULT_IF="${OPTARG}"
- ;;
- d)
- OSM_DOCKER_WORK_DIR="${OPTARG}"
- ;;
- D)
- OSM_DEVOPS="${OPTARG}"
- ;;
- P)
- OSM_BEHIND_PROXY="y"
- ;;
- -)
- [ "${OPTARG}" == "help" ] && usage && exit 0
- [ "${OPTARG}" == "debug" ] && DEBUG_INSTALL="y" && continue
- echo -e "Invalid option: '--$OPTARG'\n" >&2
- exit 1
- ;;
- :)
- echo "Option -$OPTARG requires an argument" >&2
- exit 1
- ;;
- \?)
- echo -e "Invalid option: '-$OPTARG'\n" >&2
- exit 1
- ;;
- h)
- usage && exit 0
- ;;
- *)
- exit 1
- ;;
- esac
-done
-
-source $OSM_DEVOPS/common/logging
-source $OSM_DEVOPS/common/track
-
-echo "DEBUG_INSTALL=$DEBUG_INSTALL"
-echo "OSM_BEHIND_PROXY=$OSM_BEHIND_PROXY"
-echo "OSM_DEFAULT_IF=$OSM_DEFAULT_IF"
-echo "OSM_DEVOPS=$OSM_DEVOPS"
-
-[ -z "$INSTALL_NOJUJU" ] && install_lxd
-track prereq lxd_install_ok
-
diff --git a/installers/install_osm.sh b/installers/install_osm.sh
index 816f39d2..d46b7d43 100755
--- a/installers/install_osm.sh
+++ b/installers/install_osm.sh
@@ -25,7 +25,7 @@ function usage(){
echo -e " -h / --help: print this help"
echo -e " -y: do not prompt for confirmation, assumes yes"
echo -e " -r : use specified repository name for osm packages"
- echo -e " -R : use specified release for osm binaries (deb packages, lxd images, ...)"
+ echo -e " -R : use specified release for osm binaries (deb packages, ...)"
echo -e " -u : use specified repository url for osm packages"
echo -e " -k : use specified repository public key url"
echo -e " -a : use this apt proxy url when downloading apt packages (air-gapped installation)"
@@ -37,40 +37,17 @@ function usage(){
echo -e " --no-aux-cluster: Do not provision an auxiliary cluster for cloud-native gitops operations in OSM (NEW in Release SIXTEEN) (by default, it is installed)"
echo -e " -D : use local devops installation path"
echo -e " -s namespace when installed using k8s, default is osm"
- echo -e " -H use specific juju host controller IP"
- echo -e " -S use VCA/juju secret key"
- echo -e " -P use VCA/juju public key file"
- echo -e " -A use VCA/juju API proxy"
echo -e " -w : Location to store runtime installation"
- echo -e " -l: LXD cloud yaml file"
- echo -e " -L: LXD credentials yaml file"
echo -e " -K: Specifies the name of the controller to use - The controller must be already bootstrapped"
echo -e " -d use docker registry URL instead of dockerhub"
echo -e " -p set docker proxy URL as part of docker CE configuration"
echo -e " -T specify docker tag for the modules specified with option -m"
echo -e " --debug: debug mode"
- echo -e " --nocachelxdimages: do not cache local lxd images, do not create cronjob for that cache (will save installation time, might affect instantiation time)"
- echo -e " --cachelxdimages: cache local lxd images, create cronjob for that cache (will make installation longer)"
- echo -e " --nolxd: do not install and configure LXD, allowing unattended installations (assumes LXD is already installed and confifured)"
echo -e " --nodocker: do not install docker, do not initialize a swarm (assumes docker is already installed and a swarm has been initialized)"
- echo -e " --nojuju: do not juju, assumes already installed"
- echo -e " --nohostports: do not expose docker ports to host (useful for creating multiple instances of osm on the same host)"
echo -e " --nohostclient: do not install the osmclient"
echo -e " --uninstall: uninstall OSM: remove the containers and delete NAT rules"
echo -e " --k8s_monitor: install the OSM kubernetes monitoring with prometheus and grafana"
echo -e " --showopts: print chosen options and exit (only for debugging)"
- echo -e " --charmed: Deploy and operate OSM with Charms on k8s"
- echo -e " [--bundle ]: Specify with which bundle to deploy OSM with charms (--charmed option)"
- echo -e " [--k8s ]: Specify with which kubernetes to deploy OSM with charms (--charmed option)"
- echo -e " [--vca ]: Specifies the name of the controller to use - The controller must be already bootstrapped (--charmed option)"
- echo -e " [--small-profile]: Do not install and configure LXD which aims to use only K8s Clouds (--charmed option)"
- echo -e " [--lxd ]: Takes a YAML file as a parameter with the LXD Cloud information (--charmed option)"
- echo -e " [--lxd-cred ]: Takes a YAML file as a parameter with the LXD Credentials information (--charmed option)"
- echo -e " [--microstack]: Installs microstack as a vim. (--charmed option)"
- echo -e " [--overlay]: Add an overlay to override some defaults of the default bundle (--charmed option)"
- echo -e " [--ha]: Installs High Availability bundle. (--charmed option)"
- echo -e " [--tag]: Docker image tag. (--charmed option)"
- echo -e " [--registry]: Docker registry with optional credentials as user:pass@hostname:port (--charmed option)"
}
add_repo() {
@@ -126,7 +103,7 @@ EOF"
[ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
}
-while getopts ":a:c:e:r:n:k:u:R:D:o:O:N:H:S:s:t:U:P:A:l:L:K:d:p:T:f:F:G:M:-: hy" o; do
+while getopts ":a:c:e:r:n:k:u:R:D:o:O:N:s:t:U:l:L:K:d:p:T:f:F:G:M:-: hy" o; do
case "${o}" in
D)
diff --git a/installers/uninstall_osm.sh b/installers/uninstall_osm.sh
index 1aa9f365..a57c60f5 100755
--- a/installers/uninstall_osm.sh
+++ b/installers/uninstall_osm.sh
@@ -28,12 +28,6 @@ function remove_volumes() {
[ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
}
-function remove_crontab_job() {
- [ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
- crontab -l | grep -v '${OSM_DEVOPS}/installers/update-juju-lxc-images' | crontab -
- [ -z "${DEBUG_INSTALL}" ] || DEBUG end of function
-}
-
function uninstall_k8s_monitoring() {
[ -z "${DEBUG_INSTALL}" ] || DEBUG beginning of function
# uninstall OSM monitoring
@@ -72,8 +66,6 @@ EONG
[ -z "$CONTROLLER_NAME" ] && sg lxd -c "juju kill-controller -t 0 -y $OSM_NAMESPACE"
- remove_crontab_job
-
# Cleanup Openstack installer venv
if [ -d "$OPENSTACK_PYTHON_VENV" ]; then
rm -r $OPENSTACK_PYTHON_VENV
diff --git a/installers/update-juju-lxc-images b/installers/update-juju-lxc-images
deleted file mode 100755
index 18f85c98..00000000
--- a/installers/update-juju-lxc-images
+++ /dev/null
@@ -1,138 +0,0 @@
-#!/bin/bash
-# Copyright 2019 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# This script will create lxd images that will be used by the
-# lxd provider in juju 2.1+ It is for use with the lxd provider for local
-# development and preinstalls a common set of production packages.
-#
-# This is important, as between them, basenode and layer-basic install ~111
-# packages, before we even get to any packages installed by your charm.
-#
-# It also installs some helpful development tools, and pre-downloads some
-# commonly used packages.
-#
-# This dramatically speeds up the install hooks for lxd deploys. On my slow
-# laptop, average install hook time went from ~7min down to ~1 minute.
-function usage() {
- echo -e "usage: update-juju-lxc-images [Optional flags]"
- echo -e "This script will automatically cache all LTS series by default (trusty, xenial, bionic)"
- echo -e ""
- echo -e "Optional flags"
- echo -e "=================="
- echo -e "--trusty It will download only the trusty series"
- echo -e "--xenial It will download only the xenial series"
- echo -e "--bionic It will download only the bionic series"
- echo -e ""
- echo -e "Help flags"
- echo -e "=================="
- echo -e "-h | --help Print full help."
- exit
-}
-
-FLAGS=0
-trusty=0
-xenial=0
-bionic=0
-while :; do
- case $1 in
- --trusty)
- FLAGS=1
- trusty=1
- ;;
- --xenial)
- FLAGS=1
- xenial=1
- ;;
- --bionic)
- FLAGS=1
- bionic=1
- ;;
- -h|--help)
- usage
- ;;
- *)
- break
- esac
- shift
-done
-
-
-set -eux
-
-# The basic charm layer also installs all the things. 47 packages.
-LAYER_BASIC="gcc build-essential python3-pip python3-setuptools python3-yaml"
-
-# the basic layer also installs virtualenv, but the name changed in xenial.
-TRUSTY_PACKAGES="python-virtualenv"
-XENIAL_PACKAGES="virtualenv"
-BIONIC_PACKAGES="virtualenv"
-
-# Predownload common packages used by your charms in development
-DOWNLOAD_PACKAGES=
-
-CLOUD_INIT_PACKAGES="curl cpu-checker bridge-utils cloud-utils tmux ubuntu-fan"
-
-PACKAGES="$LAYER_BASIC $DOWNLOAD_PACKAGES"
-
-JUJU_FULL_VERSION=`juju version` # 2.4.4-bionic-amd64
-JUJU_VERSION=`echo $JUJU_FULL_VERSION | awk -F"-" '{print $1}'`
-OS_VERSION=`echo $JUJU_FULL_VERSION | awk -F"-" '{print $2}'`
-ARCH=`echo $JUJU_FULL_VERSION | awk -F"-" '{print $3}'`
-
-function cache() {
- series=$1
- container=juju-${series}-base
- alias=juju/$series/amd64
-
- lxc delete $container -f || true
- lxc image copy ubuntu:$series local: --alias clean-$series
- lxc launch ubuntu:$series $container
- sleep 15 # wait for network
-
- lxc exec $container -- apt-get update -y
- lxc exec $container -- apt-get upgrade -y
- lxc exec $container -- apt-get install -y $CLOUD_INIT_PACKAGES $PACKAGES $2
-
- # Install juju agent
- echo "Installing Juju agent $JUJU_FULL_VERSION"
- # TODO: verify if the version exists
-
- lxc exec $container -- mkdir -p /var/lib/juju/tools/$JUJU_FULL_VERSION/
-
- lxc exec $container -- curl -sS --connect-timeout 20 --noproxy \* --insecure -o /var/lib/juju/tools/$JUJU_FULL_VERSION/tools.tar.gz https://streams.canonical.com/juju/tools/agent/$JUJU_VERSION/juju-$JUJU_VERSION-ubuntu-$ARCH.tgz
-
- lxc exec $container -- tar zxf /var/lib/juju/tools/$JUJU_FULL_VERSION/tools.tar.gz -C /var/lib/juju/tools/$JUJU_FULL_VERSION || true
-
- # Cache pip packages so installation into venv is faster?
- # pip3 download --cache-dir ~/.cache/pip charmhelpers
-
- lxc stop $container
-
- lxc image delete $alias || true
- lxc image delete clean-$series || true
- lxc publish $container --alias $alias description="$series juju dev image ($(date +%Y%m%d))"
-
- lxc delete $container -f || true
-}
-
-# Enable caching of the serie(s) you're developing for.
-if [ $FLAGS == 0 ]; then
- cache xenial "$XENIAL_PACKAGES"
-else
- [ $trusty == 1 ] && cache trusty "$TRUSTY_PACKAGES"
- [ $xenial == 1 ] && cache xenial "$XENIAL_PACKAGES"
- [ $bionic == 1 ] && cache bionic "$BIONIC_PACKAGES"
-fi
diff --git a/tools/debug/charmed/README.md b/tools/debug/charmed/README.md
deleted file mode 100644
index 93bf7ee6..00000000
--- a/tools/debug/charmed/README.md
+++ /dev/null
@@ -1,147 +0,0 @@
-
-
-# Debugging Charmed OSM
-
-This document aims to provide the OSM community an easy way of testing and debugging OSM.
-
-Benefits:
-
-- Use upstream published images for debugging: No need to build local images anymore.
-- Easily configure modules for debugging_mode: `juju config debug_mode=True debug_pubkey="ssh-rsa ..."`.
-- Debug in K8s: All pods (the debugged ones and the rest) will be running always in K8s.
-- Seemless setup: VSCode will connect through SSH to the pods.
-- Keep your changes save: Possibility to mount local module to the container; all the changes will be saved automatically to your local filesystem.
-
-## Install OSM
-
-Download the installer:
-
-```bash
-wget http://osm-download.etsi.org/ftp/osm-10.0-ten/install_osm.sh
-chmod +x install_osm.sh
-```
-
-Install OSM from master (tag=testing-daily):
-
-```bash
-./install_osm.sh -R testing-daily -r testing --charmed
-```
-
-Install OSM from a specific tag:
-
-```bash
-./install_osm.sh -R testing-daily -r testing --charmed --tag
-```
-
-## Debugging
-
-Once the Charmed OSM installation has finished, you can select which applications you want to run with the debug mode.
-
-```bash
-# LCM
-juju config lcm debug_mode=True debug_pubkey="`cat ~/.ssh/id_rsa.pub`"
-# MON
-juju config mon debug_mode=True debug_pubkey="`cat ~/.ssh/id_rsa.pub`"
-# NBI
-juju config nbi debug_mode=True debug_pubkey="`cat ~/.ssh/id_rsa.pub`"
-# RO
-juju config ro debug_mode=True debug_pubkey="`cat ~/.ssh/id_rsa.pub`"
-# POL
-juju config pol debug_mode=True debug_pubkey="`cat ~/.ssh/id_rsa.pub`"
-```
-
-Enabling the debug_mode will put a `sleep infinity` as the entrypoint of the container. That way, we can later connect to the pod through SSH in VSCode, and run the entrypoint of the application from the debugger.
-
-### Mounting local modules
-
-The Charmed OSM Debugging mode allows you to mount local modules to the desired charms. The following commands show which modules can be mounted in each charm.
-
-```bash
-LCM_LOCAL_PATH="/path/to/LCM"
-N2VC_LOCAL_PATH="/path/to/N2VC"
-NBI_LOCAL_PATH="/path/to/NBI"
-RO_LOCAL_PATH="/path/to/RO"
-MON_LOCAL_PATH="/path/to/MON"
-POL_LOCAL_PATH="/path/to/POL"
-COMMON_LOCAL_PATH="/path/to/common"
-
-# LCM
-juju config lcm debug_lcm_local_path=$LCM_LOCAL_PATH
-juju config lcm debug_n2vc_local_path=$N2VC_LOCAL_PATH
-juju config lcm debug_common_local_path=$COMMON_LOCAL_PATH
-# MON
-juju config mon debug_mon_local_path=$MON_LOCAL_PATH
-juju config mon debug_n2vc_local_path=$N2VC_LOCAL_PATH
-juju config mon debug_common_local_path=$COMMON_LOCAL_PATH
-# NBI
-juju config nbi debug_nbi_local_path=$LCM_LOCAL_PATH
-juju config nbi debug_common_local_path=$COMMON_LOCAL_PATH
-# RO
-juju config ro debug_ro_local_path=$RO_LOCAL_PATH
-juju config ro debug_common_local_path=$COMMON_LOCAL_PATH
-# POL
-juju config pol debug_pol_local_path=$POL_LOCAL_PATH
-juju config pol debug_common_local_path=$COMMON_LOCAL_PATH
-```
-
-### Generate SSH config file
-
-Preparing the pods includes setting up the `~/.ssh/config` so the VSCode can easily discover which ssh hosts are available
-
-Just execute:
-
-```bash
-./generate_ssh_config.sh
-```
-
-> NOTE: The public key that will be used will be `$HOME/.ssh/id_rsa.pub`. If you want to use a different one, add the absolute path to it as a first argument: `./generate_ssh_config.sh /path/to/key.pub`.
-
-### Connect to Pods
-
-In VScode, navigate to [Remote Explorer](https://code.visualstudio.com/docs/remote/ssh#_remember-hosts-and-advanced-settings), and select the pod to which you want to connect.
-
-You should be able to see the following hosts in the Remote Explorer:
-
-- lcm
-- mon
-- nbi
-- ro
-- pol
-
-Right click on the host, and "Connect to host in a New Window".
-
-### Add workspace
-
-The `./generate_ssh_config.sh` script adds a workspace to the `/root` folder of each pod, with the following name: `debug.code-workspace`.
-
-In the window of the connected host, go to `File/Open Workspace from File...`. Then select the `debug.code-workspace` file.
-
-### Run and Debug
-
-Open the `Terminal` tab, and the Python extension will be automatically downloaded. It will be installed in the remote pod.
-
-Go to the `Explorer (ctrl + shift + E)` to see the module folders in the charm. You can add breakpoints and start debugging.
-
-Go to the `Run and Debug (ctrl + shift + D)` and press `F5` to start the main entrypoint of the charm.
-
-Happy debugging!
diff --git a/tools/debug/charmed/generate_ssh_config.sh b/tools/debug/charmed/generate_ssh_config.sh
deleted file mode 100755
index 58d06861..00000000
--- a/tools/debug/charmed/generate_ssh_config.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/bash
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-MODULES="lcm pol mon ro nbi"
-
-
-PRIVATE_KEY=${1:-$HOME/.ssh/id_rsa}
-echo "Using $PRIVATE_KEY key."
-[ -f $PRIVATE_KEY ] || (echo "$PRIVATE_KEY file does not exist" && exit 1)
-PRIVATE_KEY_CONTENT=`cat $PRIVATE_KEY`
-
-mkdir -p ~/.ssh/config.d
-echo "" | tee ~/.ssh/config.d/osm
-
-
-for module in $MODULES; do
- if [[ `juju config -m osm $module debug_mode` == "true" ]]; then
- pod_name=`microk8s.kubectl -n osm get pods | grep -E "^$module-" | grep -v operator | cut -d " " -f 1`
- pod_ip=`microk8s.kubectl -n osm get pods $pod_name -o yaml | yq e .status.podIP -`
- echo "Host $module
- HostName $pod_ip
- User root
- # StrictHostKeyChecking no
- IdentityFile $PRIVATE_KEY" | tee -a ~/.ssh/config.d/osm
- fi
-done
-
-
-import_osm_config="Include config.d/osm"
-touch ~/.ssh/config
-grep "$import_osm_config" ~/.ssh/config || ( echo -e "$import_osm_config\n$(cat ~/.ssh/config)" > ~/.ssh/config )
\ No newline at end of file
diff --git a/tools/promote-charms-and-snaps.sh b/tools/promote-charms-and-snaps.sh
deleted file mode 100755
index 1ace0dc9..00000000
--- a/tools/promote-charms-and-snaps.sh
+++ /dev/null
@@ -1,101 +0,0 @@
-#!/bin/bash
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-CHANNEL=${1:-latest}
-SOURCE=${2:-beta}
-TARGET=${3:-candidate}
-echo "==========================================================="
-echo Promoting charms and snaps from $SOURCE to $TARGET
-echo ""
-
-for snap in osmclient ; do
-
- echo "==========================================================="
- echo "${snap}"
-
- track="${CHANNEL}/${SOURCE}\\*"
- SOURCE_REV=$(snapcraft revisions $snap | grep $track | tail -1 | awk '{print $1}')
- track="${CHANNEL}/${TARGET}\\*"
- TARGET_REV=$(snapcraft revisions $snap | grep $track | tail -1 | awk '{print $1}')
-
- echo "$SOURCE: $SOURCE_REV, $TARGET: $TARGET_REV"
-
- if [ -z $TARGET_REV ] || [ $SOURCE_REV -ne $TARGET_REV ]; then
- echo "Promoting $SOURCE_REV to beta in place of $TARGET_REV"
- track="${CHANNEL}/${TARGET}"
- snapcraft release $snap $SOURCE_REV $track
- fi
-
-done
-
-for charm in \
- 'osm' \
- 'osm-ha' \
- 'osm-grafana' \
- 'mongodb-exporter-k8s' \
- 'mysqld-exporter-k8s' \
- 'osm-lcm' \
- 'osm-mon' \
- 'osm-nbi' \
- 'osm-ng-ui' \
- 'osm-pol' \
- 'osm-ro' \
- 'osm-prometheus' \
- 'osm-vca-integrator' ; do
-
- echo "==========================================================="
- echo "${charm}"
-
- charmcraft status $charm --format json > ${charm}.json
- isCharm=$(grep architecture ${charm}.json | wc -l 2>/dev/null)
- resourceArgument=""
-
- if [ $isCharm -gt 0 ]; then
- base=20.04
- is2204=$(cat ${charm}.json | jq -r ".[] | select(.track==\"$CHANNEL\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"22.04\")"|wc -l)
- if [ $is2204 -gt 0 ]; then
- base=22.04
- fi
-
-
- SOURCE_REV=$(cat ${charm}.json | jq -r ".[] | select(.track==\"$CHANNEL\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"$base\") | .releases[] | select(.channel==\"$CHANNEL/$SOURCE\")| .version"|head -1)
- TARGET_REV=$(cat ${charm}.json | jq -r ".[] | select(.track==\"$CHANNEL\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"$base\") | .releases[] | select(.channel==\"$CHANNEL/$TARGET\")| .version"|head -1)
-
-
- index=0
- while [ $index -lt 5 ]; do
- resourceName=$(cat ${charm}.json | jq -r ".[] | select(.track==\"$CHANNEL\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"$base\") | .releases[] | select(.channel==\"$CHANNEL/$SOURCE\")| .resources[$index].name"|head -1)
- resourceRevs=$(cat ${charm}.json | jq -r ".[] | select(.track==\"$CHANNEL\") | .mappings[] | select(.base.architecture==\"amd64\" and .base.channel==\"$base\") | .releases[] | select(.channel==\"$CHANNEL/$SOURCE\")| .resources[$index].revision"|head -1)
- if [ "$resourceName" != "null" ] ; then
- resourceArgument=" $resourceArgument --resource ${resourceName}:${resourceRevs}"
- else
- break
- fi
- ((index=index+1))
- done
- else
- SOURCE_REV=$(cat ${charm}.json | jq -r ".[] | select(.track==\"$CHANNEL\") | .mappings[].releases[] | select(.channel==\"$CHANNEL/$SOURCE\")| .version"|head -1)
- TARGET_REV=$(cat ${charm}.json | jq -r ".[] | select(.track==\"$CHANNEL\") | .mappings[].releases[] | select(.channel==\"$CHANNEL/$TARGET\")| .version"|head -1)
- fi
-
- rm ${charm}.json
- echo "$SOURCE: $SOURCE_REV, $TARGET: $TARGET_REV $resourceArgument"
-
- if [ $TARGET_REV == "null" ] || [ $SOURCE_REV -gt $TARGET_REV ] ; then
- echo Promoting ${charm} revision ${SOURCE_REV} to ${TARGET} ${resourceArgument}
- charmcraft release ${charm} --revision=${SOURCE_REV} ${resourceArgument} --channel=${CHANNEL}/$TARGET
- fi
-
-done