From: sousaedu Date: Tue, 18 May 2021 15:28:17 +0000 (+0200) Subject: Moving exporter charms to use opslib X-Git-Tag: release-v10.0-start~1 X-Git-Url: https://osm.etsi.org/gitweb/?p=osm%2Fdevops.git;a=commitdiff_plain;h=1072160c684a35076746d7b789bb0040643d3a20 Moving exporter charms to use opslib This commit also includes external DB configuration option. Change-Id: Iddb4adfae582ecfc6af2d797716e386420ad1df8 Signed-off-by: sousaedu --- diff --git a/installers/charm/kafka-exporter/.gitignore b/installers/charm/kafka-exporter/.gitignore index 0933edca..2885df27 100644 --- a/installers/charm/kafka-exporter/.gitignore +++ b/installers/charm/kafka-exporter/.gitignore @@ -22,7 +22,9 @@ venv .vscode build -kafka-exporter.charm +*.charm .coverage +coverage.xml .stestr cover +release \ No newline at end of file diff --git a/installers/charm/kafka-exporter/.jujuignore b/installers/charm/kafka-exporter/.jujuignore new file mode 100644 index 00000000..3ae3e7dc --- /dev/null +++ b/installers/charm/kafka-exporter/.jujuignore @@ -0,0 +1,34 @@ +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact: legal@canonical.com +# +# To get in touch with the maintainers, please contact: +# osm-charmers@lists.launchpad.net +## + +venv +.vscode +build +*.charm +.coverage +coverage.xml +.gitignore +.stestr +cover +release +tests/ +requirements* +tox.ini diff --git a/installers/charm/kafka-exporter/.yamllint.yaml b/installers/charm/kafka-exporter/.yamllint.yaml index f300159a..d71fb69f 100644 --- a/installers/charm/kafka-exporter/.yamllint.yaml +++ b/installers/charm/kafka-exporter/.yamllint.yaml @@ -28,4 +28,7 @@ yaml-files: - ".yamllint" ignore: | .tox + cover/ build/ + venv + release/ diff --git a/installers/charm/kafka-exporter/requirements-test.txt b/installers/charm/kafka-exporter/requirements-test.txt new file mode 100644 index 00000000..316f6d20 --- /dev/null +++ b/installers/charm/kafka-exporter/requirements-test.txt @@ -0,0 +1,21 @@ +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact: legal@canonical.com +# +# To get in touch with the maintainers, please contact: +# osm-charmers@lists.launchpad.net + +mock==4.0.3 diff --git a/installers/charm/kafka-exporter/requirements.txt b/installers/charm/kafka-exporter/requirements.txt index 884cf9f9..8bb93ad3 100644 --- a/installers/charm/kafka-exporter/requirements.txt +++ b/installers/charm/kafka-exporter/requirements.txt @@ -19,5 +19,4 @@ # osm-charmers@lists.launchpad.net ## -ops -git+https://github.com/juju-solutions/resource-oci-image/@c5778285d332edf3d9a538f9d0c06154b7ec1b0b#egg=oci-image +git+https://github.com/charmed-osm/ops-lib-charmed-osm/@master diff --git a/installers/charm/kafka-exporter/src/charm.py b/installers/charm/kafka-exporter/src/charm.py index 9f03a343..123fa0b2 100755 --- a/installers/charm/kafka-exporter/src/charm.py +++ b/installers/charm/kafka-exporter/src/charm.py @@ -20,203 +20,207 @@ # osm-charmers@lists.launchpad.net ## +# pylint: disable=E0213 + +from ipaddress import ip_network import logging from pathlib import Path -from typing import Dict, List, NoReturn +from typing import NoReturn, Optional from urllib.parse import urlparse -from ops.charm import CharmBase -from ops.framework import EventBase, StoredState from ops.main import main -from ops.model import ActiveStatus, Application, BlockedStatus, MaintenanceStatus, Unit -from oci_image import OCIImageResource, OCIImageResourceError +from opslib.osm.charm import CharmedOsmBase, RelationsMissing +from opslib.osm.interfaces.grafana import GrafanaDashboardTarget +from opslib.osm.interfaces.kafka import KafkaClient +from opslib.osm.interfaces.prometheus import PrometheusScrapeTarget +from opslib.osm.pod import ( + ContainerV3Builder, + IngressResourceV3Builder, + PodSpecV3Builder, +) +from opslib.osm.validator import ModelValidator, validator -from pod_spec import make_pod_spec logger = logging.getLogger(__name__) -KAFKA_EXPORTER_PORT = 9308 - +PORT = 9308 -class RelationsMissing(Exception): - def __init__(self, missing_relations: List): - self.message = "" - if missing_relations and isinstance(missing_relations, list): - self.message += f'Waiting for {", ".join(missing_relations)} relation' - if "," in self.message: - self.message += "s" +class ConfigModel(ModelValidator): + site_url: Optional[str] + cluster_issuer: Optional[str] + ingress_whitelist_source_range: Optional[str] + tls_secret_name: Optional[str] -class RelationDefinition: - def __init__(self, relation_name: str, keys: List, source_type): - if source_type != Application and source_type != Unit: - raise TypeError( - "source_type should be ops.model.Application or ops.model.Unit" - ) - self.relation_name = relation_name - self.keys = keys - self.source_type = source_type - - -def check_missing_relation_data( - data: Dict, - expected_relations_data: List[RelationDefinition], -): - missing_relations = [] - for relation_data in expected_relations_data: - if not all( - f"{relation_data.relation_name}_{k}" in data for k in relation_data.keys - ): - missing_relations.append(relation_data.relation_name) - if missing_relations: - raise RelationsMissing(missing_relations) - - -def get_relation_data( - charm: CharmBase, - relation_data: RelationDefinition, -) -> Dict: - data = {} - relation = charm.model.get_relation(relation_data.relation_name) - if relation: - self_app_unit = ( - charm.app if relation_data.source_type == Application else charm.unit - ) - expected_type = relation_data.source_type - for app_unit in relation.data: - if app_unit != self_app_unit and isinstance(app_unit, expected_type): - if all(k in relation.data[app_unit] for k in relation_data.keys): - for k in relation_data.keys: - data[f"{relation_data.relation_name}_{k}"] = relation.data[ - app_unit - ].get(k) - break - return data - + @validator("site_url") + def validate_site_url(cls, v): + if v: + parsed = urlparse(v) + if not parsed.scheme.startswith("http"): + raise ValueError("value must start with http") + return v -class KafkaExporterCharm(CharmBase): - """Kafka Exporter Charm.""" + @validator("ingress_whitelist_source_range") + def validate_ingress_whitelist_source_range(cls, v): + if v: + ip_network(v) + return v - state = StoredState() +class KafkaExporterCharm(CharmedOsmBase): def __init__(self, *args) -> NoReturn: - """Kafka Exporter Charm constructor.""" - super().__init__(*args) - - # Internal state initialization - self.state.set_default(pod_spec=None) - - self.port = KAFKA_EXPORTER_PORT - self.image = OCIImageResource(self, "image") - - # Registering regular events - self.framework.observe(self.on.start, self.configure_pod) - self.framework.observe(self.on.config_changed, self.configure_pod) + super().__init__(*args, oci_image="image") - # Registering required relation events - self.framework.observe(self.on.kafka_relation_changed, self.configure_pod) + # Provision Kafka relation to exchange information + self.kafka_client = KafkaClient(self, "kafka") + self.framework.observe(self.on["kafka"].relation_changed, self.configure_pod) + self.framework.observe(self.on["kafka"].relation_broken, self.configure_pod) - # Registering required relation departed events - self.framework.observe(self.on.kafka_relation_departed, self.configure_pod) - - # Registering provided relation events + # Register relation to provide a Scraping Target + self.scrape_target = PrometheusScrapeTarget(self, "prometheus-scrape") self.framework.observe( - self.on.prometheus_scrape_relation_joined, self._publish_scrape_info + self.on["prometheus-scrape"].relation_joined, self._publish_scrape_info ) + + # Register relation to provide a Dasboard Target + self.dashboard_target = GrafanaDashboardTarget(self, "grafana-dashboard") self.framework.observe( - self.on.grafana_dashboard_relation_joined, self._publish_dashboard_info + self.on["grafana-dashboard"].relation_joined, self._publish_dashboard_info ) - def _publish_scrape_info(self, event: EventBase) -> NoReturn: - """Publishes scrape information. + def _publish_scrape_info(self, event) -> NoReturn: + """Publishes scraping information for Prometheus. + + Args: + event (EventBase): Prometheus relation event. + """ + if self.unit.is_leader(): + hostname = ( + urlparse(self.model.config["site_url"]).hostname + if self.model.config["site_url"] + else self.model.app.name + ) + port = str(PORT) + if self.model.config.get("site_url", "").startswith("https://"): + port = "443" + elif self.model.config.get("site_url", "").startswith("http://"): + port = "80" + + self.scrape_target.publish_info( + hostname=hostname, + port=port, + metrics_path="/metrics", + scrape_interval="30s", + scrape_timeout="15s", + ) + + def _publish_dashboard_info(self, event) -> NoReturn: + """Publish dashboards for Grafana. Args: - event (EventBase): Exporter relation event. + event (EventBase): Grafana relation event. """ - rel_data = { - "hostname": urlparse(self.model.config["site_url"]).hostname - if self.model.config["site_url"] - else self.model.app.name, - "port": "80" if self.model.config["site_url"] else str(KAFKA_EXPORTER_PORT), - "metrics_path": "/metrics", - "scrape_interval": "30s", - "scrape_timeout": "15s", - } - for k, v in rel_data.items(): - event.relation.data[self.unit][k] = v - - def _publish_dashboard_info(self, event: EventBase) -> NoReturn: - """Publishes dashboard information. + if self.unit.is_leader(): + self.dashboard_target.publish_info( + name="osm-kafka", + dashboard=Path("files/kafka_exporter_dashboard.json").read_text(), + ) + + def _check_missing_dependencies(self, config: ConfigModel): + """Check if there is any relation missing. Args: - event (EventBase): Exporter relation event. + config (ConfigModel): object with configuration information. + + Raises: + RelationsMissing: if kafka is missing. """ - rel_data = { - "name": "osm-kafka", - "dashboard": Path("files/kafka_exporter_dashboard.json").read_text(), - } - for k, v in rel_data.items(): - event.relation.data[self.unit][k] = v - - @property - def relations_requirements(self): - return [RelationDefinition("kafka", ["host", "port"], Unit)] - - def get_relation_state(self): - relation_state = {} - for relation_requirements in self.relations_requirements: - data = get_relation_data(self, relation_requirements) - relation_state = {**relation_state, **data} - check_missing_relation_data(relation_state, self.relations_requirements) - return relation_state - - def configure_pod(self, _=None) -> NoReturn: - """Assemble the pod spec and apply it, if possible. + missing_relations = [] + + if self.kafka_client.is_missing_data_in_unit(): + missing_relations.append("kafka") + + if missing_relations: + raise RelationsMissing(missing_relations) + + def build_pod_spec(self, image_info): + """Build the PodSpec to be used. Args: - event (EventBase): Hook or Relation event that started the - function. + image_info (str): container image information. + + Returns: + Dict: PodSpec information. """ - if not self.unit.is_leader(): - self.unit.status = ActiveStatus("ready") - return - - relation_state = None - try: - relation_state = self.get_relation_state() - except RelationsMissing as exc: - logger.exception("Relation missing error") - self.unit.status = BlockedStatus(exc.message) - return - - self.unit.status = MaintenanceStatus("Assembling pod spec") - - # Fetch image information - try: - self.unit.status = MaintenanceStatus("Fetching image information") - image_info = self.image.fetch() - except OCIImageResourceError: - self.unit.status = BlockedStatus("Error fetching image information") - return - - try: - pod_spec = make_pod_spec( - image_info, - self.model.config, - relation_state, - self.model.app.name, - self.port, + # Validate config + config = ConfigModel(**dict(self.config)) + + # Check relations + self._check_missing_dependencies(config) + + # Create Builder for the PodSpec + pod_spec_builder = PodSpecV3Builder() + + # Build container + container_builder = ContainerV3Builder(self.app.name, image_info) + container_builder.add_port(name=self.app.name, port=PORT) + container_builder.add_http_readiness_probe( + path="/api/health", + port=PORT, + initial_delay_seconds=10, + period_seconds=10, + timeout_seconds=5, + success_threshold=1, + failure_threshold=3, + ) + container_builder.add_http_liveness_probe( + path="/api/health", + port=PORT, + initial_delay_seconds=60, + timeout_seconds=30, + failure_threshold=10, + ) + container_builder.add_command( + [ + "kafka_exporter", + f"--kafka.server={self.kafka_client.host}:{self.kafka_client.port}", + ] + ) + container = container_builder.build() + + # Add container to PodSpec + pod_spec_builder.add_container(container) + + # Add ingress resources to PodSpec if site url exists + if config.site_url: + parsed = urlparse(config.site_url) + annotations = {} + ingress_resource_builder = IngressResourceV3Builder( + f"{self.app.name}-ingress", annotations ) - except ValueError as exc: - logger.exception("Config/Relation data validation error") - self.unit.status = BlockedStatus(str(exc)) - return - if self.state.pod_spec != pod_spec: - self.model.pod.set_spec(pod_spec) - self.state.pod_spec = pod_spec + if config.ingress_whitelist_source_range: + annotations[ + "nginx.ingress.kubernetes.io/whitelist-source-range" + ] = config.ingress_whitelist_source_range + + if config.cluster_issuer: + annotations["cert-manager.io/cluster-issuer"] = config.cluster_issuer + + if parsed.scheme == "https": + ingress_resource_builder.add_tls( + [parsed.hostname], config.tls_secret_name + ) + else: + annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false" + + ingress_resource_builder.add_rule(parsed.hostname, self.app.name, PORT) + ingress_resource = ingress_resource_builder.build() + pod_spec_builder.add_ingress_resource(ingress_resource) + + logger.debug(pod_spec_builder.build()) - self.unit.status = ActiveStatus("ready") + return pod_spec_builder.build() if __name__ == "__main__": diff --git a/installers/charm/kafka-exporter/src/pod_spec.py b/installers/charm/kafka-exporter/src/pod_spec.py index 90886cb2..214d6529 100644 --- a/installers/charm/kafka-exporter/src/pod_spec.py +++ b/installers/charm/kafka-exporter/src/pod_spec.py @@ -20,8 +20,8 @@ # osm-charmers@lists.launchpad.net ## -import logging from ipaddress import ip_network +import logging from typing import Any, Dict, List from urllib.parse import urlparse diff --git a/installers/charm/kafka-exporter/tests/__init__.py b/installers/charm/kafka-exporter/tests/__init__.py index 4fd849a5..90dc417c 100644 --- a/installers/charm/kafka-exporter/tests/__init__.py +++ b/installers/charm/kafka-exporter/tests/__init__.py @@ -23,9 +23,17 @@ """Init mocking for unit tests.""" import sys + import mock + +class OCIImageResourceErrorMock(Exception): + pass + + sys.path.append("src") oci_image = mock.MagicMock() +oci_image.OCIImageResourceError = OCIImageResourceErrorMock sys.modules["oci_image"] = oci_image +sys.modules["oci_image"].OCIImageResource().fetch.return_value = {} diff --git a/installers/charm/kafka-exporter/tests/test_charm.py b/installers/charm/kafka-exporter/tests/test_charm.py index fc50b499..3f266fe9 100644 --- a/installers/charm/kafka-exporter/tests/test_charm.py +++ b/installers/charm/kafka-exporter/tests/test_charm.py @@ -20,13 +20,14 @@ # osm-charmers@lists.launchpad.net ## +import sys from typing import NoReturn import unittest -from ops.model import BlockedStatus -from ops.testing import Harness from charm import KafkaExporterCharm +from ops.model import ActiveStatus, BlockedStatus +from ops.testing import Harness class TestCharm(unittest.TestCase): @@ -34,455 +35,520 @@ class TestCharm(unittest.TestCase): def setUp(self) -> NoReturn: """Test setup""" + self.image_info = sys.modules["oci_image"].OCIImageResource().fetch() self.harness = Harness(KafkaExporterCharm) self.harness.set_leader(is_leader=True) self.harness.begin() + self.config = { + "ingress_whitelist_source_range": "", + "tls_secret_name": "", + "site_url": "https://kafka-exporter.192.168.100.100.nip.io", + "cluster_issuer": "vault-issuer", + } + self.harness.update_config(self.config) - def test_on_start_without_relations(self) -> NoReturn: - """Test installation without any relation.""" - self.harness.charm.on.start.emit() + def test_config_changed_no_relations( + self, + ) -> NoReturn: + """Test ingress resources without HTTP.""" - # Verifying status - self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus) + self.harness.charm.on.config_changed.emit() - # Verifying status message - self.assertGreater(len(self.harness.charm.unit.status.message), 0) + # Assertions + self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus) + print(self.harness.charm.unit.status.message) self.assertTrue( - self.harness.charm.unit.status.message.startswith("Waiting for ") + all( + relation in self.harness.charm.unit.status.message + for relation in ["kafka"] + ) ) - self.assertIn("kafka", self.harness.charm.unit.status.message) - self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation")) - def test_on_start_with_relations_without_http(self) -> NoReturn: - """Test deployment.""" - expected_result = { - "version": 3, - "containers": [ - { - "name": "kafka-exporter", - "imageDetails": self.harness.charm.image.fetch(), - "imagePullPolicy": "Always", - "ports": [ - { - "name": "kafka-exporter", - "containerPort": 9308, - "protocol": "TCP", - } - ], - "envConfig": {}, - "command": ["kafka_exporter", "--kafka.server=kafka:9090"], - "kubernetes": { - "readinessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9308, - }, - "initialDelaySeconds": 10, - "periodSeconds": 10, - "timeoutSeconds": 5, - "successThreshold": 1, - "failureThreshold": 3, - }, - "livenessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9308, - }, - "initialDelaySeconds": 60, - "timeoutSeconds": 30, - "failureThreshold": 10, - }, - }, - }, - ], - "kubernetesResources": {"ingressResources": []}, - } + def test_config_changed_non_leader( + self, + ) -> NoReturn: + """Test ingress resources without HTTP.""" + self.harness.set_leader(is_leader=False) + self.harness.charm.on.config_changed.emit() - self.harness.charm.on.start.emit() + # Assertions + self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus) - # Initializing the kafka relation - relation_id = self.harness.add_relation("kafka", "kafka") - self.harness.add_relation_unit(relation_id, "kafka/0") - self.harness.update_relation_data( - relation_id, - "kafka/0", - { - "host": "kafka", - "port": "9090", - }, - ) + def test_with_relations( + self, + ) -> NoReturn: + "Test with relations" + self.initialize_kafka_relation() # Verifying status self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus) - pod_spec, _ = self.harness.get_pod_spec() - - self.assertDictEqual(expected_result, pod_spec) - - def test_ingress_resources_with_http(self) -> NoReturn: - """Test ingress resources with HTTP.""" - expected_result = { - "version": 3, - "containers": [ - { - "name": "kafka-exporter", - "imageDetails": self.harness.charm.image.fetch(), - "imagePullPolicy": "Always", - "ports": [ - { - "name": "kafka-exporter", - "containerPort": 9308, - "protocol": "TCP", - } - ], - "envConfig": {}, - "command": ["kafka_exporter", "--kafka.server=kafka:9090"], - "kubernetes": { - "readinessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9308, - }, - "initialDelaySeconds": 10, - "periodSeconds": 10, - "timeoutSeconds": 5, - "successThreshold": 1, - "failureThreshold": 3, - }, - "livenessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9308, - }, - "initialDelaySeconds": 60, - "timeoutSeconds": 30, - "failureThreshold": 10, - }, - }, - }, - ], - "kubernetesResources": { - "ingressResources": [ - { - "name": "kafka-exporter-ingress", - "annotations": { - "nginx.ingress.kubernetes.io/ssl-redirect": "false", - }, - "spec": { - "rules": [ - { - "host": "kafka-exporter", - "http": { - "paths": [ - { - "path": "/", - "backend": { - "serviceName": "kafka-exporter", - "servicePort": 9308, - }, - } - ] - }, - } - ] - }, - } - ], - }, - } - - self.harness.charm.on.start.emit() - - # Initializing the kafka relation - relation_id = self.harness.add_relation("kafka", "kafka") - self.harness.add_relation_unit(relation_id, "kafka/0") + def initialize_kafka_relation(self): + kafka_relation_id = self.harness.add_relation("kafka", "kafka") + self.harness.add_relation_unit(kafka_relation_id, "kafka/0") self.harness.update_relation_data( - relation_id, - "kafka/0", - { - "host": "kafka", - "port": "9090", - }, + kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092} ) - self.harness.update_config({"site_url": "http://kafka-exporter"}) - - pod_spec, _ = self.harness.get_pod_spec() - - self.assertDictEqual(expected_result, pod_spec) - - def test_ingress_resources_with_https(self) -> NoReturn: - """Test ingress resources with HTTPS.""" - expected_result = { - "version": 3, - "containers": [ - { - "name": "kafka-exporter", - "imageDetails": self.harness.charm.image.fetch(), - "imagePullPolicy": "Always", - "ports": [ - { - "name": "kafka-exporter", - "containerPort": 9308, - "protocol": "TCP", - } - ], - "envConfig": {}, - "command": ["kafka_exporter", "--kafka.server=kafka:9090"], - "kubernetes": { - "readinessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9308, - }, - "initialDelaySeconds": 10, - "periodSeconds": 10, - "timeoutSeconds": 5, - "successThreshold": 1, - "failureThreshold": 3, - }, - "livenessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9308, - }, - "initialDelaySeconds": 60, - "timeoutSeconds": 30, - "failureThreshold": 10, - }, - }, - }, - ], - "kubernetesResources": { - "ingressResources": [ - { - "name": "kafka-exporter-ingress", - "annotations": {}, - "spec": { - "rules": [ - { - "host": "kafka-exporter", - "http": { - "paths": [ - { - "path": "/", - "backend": { - "serviceName": "kafka-exporter", - "servicePort": 9308, - }, - } - ] - }, - } - ], - "tls": [ - { - "hosts": ["kafka-exporter"], - "secretName": "kafka-exporter", - } - ], - }, - } - ], - }, - } - - self.harness.charm.on.start.emit() - - # Initializing the kafka relation - relation_id = self.harness.add_relation("kafka", "kafka") - self.harness.add_relation_unit(relation_id, "kafka/0") - self.harness.update_relation_data( - relation_id, - "kafka/0", - { - "host": "kafka", - "port": "9090", - }, - ) - - self.harness.update_config( - { - "site_url": "https://kafka-exporter", - "tls_secret_name": "kafka-exporter", - } - ) - - pod_spec, _ = self.harness.get_pod_spec() - - self.assertDictEqual(expected_result, pod_spec) - - def test_ingress_resources_with_https_and_ingress_whitelist(self) -> NoReturn: - """Test ingress resources with HTTPS and ingress whitelist.""" - expected_result = { - "version": 3, - "containers": [ - { - "name": "kafka-exporter", - "imageDetails": self.harness.charm.image.fetch(), - "imagePullPolicy": "Always", - "ports": [ - { - "name": "kafka-exporter", - "containerPort": 9308, - "protocol": "TCP", - } - ], - "envConfig": {}, - "command": ["kafka_exporter", "--kafka.server=kafka:9090"], - "kubernetes": { - "readinessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9308, - }, - "initialDelaySeconds": 10, - "periodSeconds": 10, - "timeoutSeconds": 5, - "successThreshold": 1, - "failureThreshold": 3, - }, - "livenessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9308, - }, - "initialDelaySeconds": 60, - "timeoutSeconds": 30, - "failureThreshold": 10, - }, - }, - }, - ], - "kubernetesResources": { - "ingressResources": [ - { - "name": "kafka-exporter-ingress", - "annotations": { - "nginx.ingress.kubernetes.io/whitelist-source-range": "0.0.0.0/0", - }, - "spec": { - "rules": [ - { - "host": "kafka-exporter", - "http": { - "paths": [ - { - "path": "/", - "backend": { - "serviceName": "kafka-exporter", - "servicePort": 9308, - }, - } - ] - }, - } - ], - "tls": [ - { - "hosts": ["kafka-exporter"], - "secretName": "kafka-exporter", - } - ], - }, - } - ], - }, - } - - self.harness.charm.on.start.emit() - - # Initializing the kafka relation - relation_id = self.harness.add_relation("kafka", "kafka") - self.harness.add_relation_unit(relation_id, "kafka/0") - self.harness.update_relation_data( - relation_id, - "kafka/0", - { - "host": "kafka", - "port": "9090", - }, - ) - - self.harness.update_config( - { - "site_url": "https://kafka-exporter", - "tls_secret_name": "kafka-exporter", - "ingress_whitelist_source_range": "0.0.0.0/0", - } - ) - - pod_spec, _ = self.harness.get_pod_spec() - - self.assertDictEqual(expected_result, pod_spec) - - def test_on_kafka_unit_relation_changed(self) -> NoReturn: - """Test to see if kafka relation is updated.""" - self.harness.charm.on.start.emit() - - relation_id = self.harness.add_relation("kafka", "kafka") - self.harness.add_relation_unit(relation_id, "kafka/0") - self.harness.update_relation_data( - relation_id, - "kafka/0", - { - "host": "kafka", - "port": "9090", - }, - ) - - # Verifying status - self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus) - - def test_publish_target_info(self) -> NoReturn: - """Test to see if target relation is updated.""" - expected_result = { - "hostname": "kafka-exporter", - "port": "9308", - "metrics_path": "/metrics", - "scrape_interval": "30s", - "scrape_timeout": "15s", - } - - self.harness.charm.on.start.emit() - - relation_id = self.harness.add_relation("prometheus-scrape", "prometheus") - self.harness.add_relation_unit(relation_id, "prometheus/0") - relation_data = self.harness.get_relation_data(relation_id, "kafka-exporter/0") - - self.assertDictEqual(expected_result, relation_data) - - def test_publish_target_info_with_site_url(self) -> NoReturn: - """Test to see if target relation is updated.""" - expected_result = { - "hostname": "kafka-exporter-osm", - "port": "80", - "metrics_path": "/metrics", - "scrape_interval": "30s", - "scrape_timeout": "15s", - } - - self.harness.charm.on.start.emit() - - self.harness.update_config({"site_url": "http://kafka-exporter-osm"}) - - relation_id = self.harness.add_relation("prometheus-scrape", "prometheus") - self.harness.add_relation_unit(relation_id, "prometheus/0") - relation_data = self.harness.get_relation_data(relation_id, "kafka-exporter/0") - - self.assertDictEqual(expected_result, relation_data) - - def test_publish_dashboard_info(self) -> NoReturn: - """Test to see if dashboard relation is updated.""" - self.harness.charm.on.start.emit() - - relation_id = self.harness.add_relation("grafana-dashboard", "grafana") - self.harness.add_relation_unit(relation_id, "grafana/0") - relation_data = self.harness.get_relation_data(relation_id, "kafka-exporter/0") - - self.assertTrue("dashboard" in relation_data) - self.assertTrue(len(relation_data["dashboard"]) > 0) - if __name__ == "__main__": unittest.main() + + +# class TestCharm(unittest.TestCase): +# """Kafka Exporter Charm unit tests.""" +# +# def setUp(self) -> NoReturn: +# """Test setup""" +# self.harness = Harness(KafkaExporterCharm) +# self.harness.set_leader(is_leader=True) +# self.harness.begin() +# +# def test_on_start_without_relations(self) -> NoReturn: +# """Test installation without any relation.""" +# self.harness.charm.on.start.emit() +# +# # Verifying status +# self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus) +# +# # Verifying status message +# self.assertGreater(len(self.harness.charm.unit.status.message), 0) +# self.assertTrue( +# self.harness.charm.unit.status.message.startswith("Waiting for ") +# ) +# self.assertIn("kafka", self.harness.charm.unit.status.message) +# self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation")) +# +# def test_on_start_with_relations_without_http(self) -> NoReturn: +# """Test deployment.""" +# expected_result = { +# "version": 3, +# "containers": [ +# { +# "name": "kafka-exporter", +# "imageDetails": self.harness.charm.image.fetch(), +# "imagePullPolicy": "Always", +# "ports": [ +# { +# "name": "kafka-exporter", +# "containerPort": 9308, +# "protocol": "TCP", +# } +# ], +# "envConfig": {}, +# "command": ["kafka_exporter", "--kafka.server=kafka:9090"], +# "kubernetes": { +# "readinessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9308, +# }, +# "initialDelaySeconds": 10, +# "periodSeconds": 10, +# "timeoutSeconds": 5, +# "successThreshold": 1, +# "failureThreshold": 3, +# }, +# "livenessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9308, +# }, +# "initialDelaySeconds": 60, +# "timeoutSeconds": 30, +# "failureThreshold": 10, +# }, +# }, +# }, +# ], +# "kubernetesResources": {"ingressResources": []}, +# } +# +# self.harness.charm.on.start.emit() +# +# # Initializing the kafka relation +# relation_id = self.harness.add_relation("kafka", "kafka") +# self.harness.add_relation_unit(relation_id, "kafka/0") +# self.harness.update_relation_data( +# relation_id, +# "kafka/0", +# { +# "host": "kafka", +# "port": "9090", +# }, +# ) +# +# # Verifying status +# self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus) +# +# pod_spec, _ = self.harness.get_pod_spec() +# +# self.assertDictEqual(expected_result, pod_spec) +# +# def test_ingress_resources_with_http(self) -> NoReturn: +# """Test ingress resources with HTTP.""" +# expected_result = { +# "version": 3, +# "containers": [ +# { +# "name": "kafka-exporter", +# "imageDetails": self.harness.charm.image.fetch(), +# "imagePullPolicy": "Always", +# "ports": [ +# { +# "name": "kafka-exporter", +# "containerPort": 9308, +# "protocol": "TCP", +# } +# ], +# "envConfig": {}, +# "command": ["kafka_exporter", "--kafka.server=kafka:9090"], +# "kubernetes": { +# "readinessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9308, +# }, +# "initialDelaySeconds": 10, +# "periodSeconds": 10, +# "timeoutSeconds": 5, +# "successThreshold": 1, +# "failureThreshold": 3, +# }, +# "livenessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9308, +# }, +# "initialDelaySeconds": 60, +# "timeoutSeconds": 30, +# "failureThreshold": 10, +# }, +# }, +# }, +# ], +# "kubernetesResources": { +# "ingressResources": [ +# { +# "name": "kafka-exporter-ingress", +# "annotations": { +# "nginx.ingress.kubernetes.io/ssl-redirect": "false", +# }, +# "spec": { +# "rules": [ +# { +# "host": "kafka-exporter", +# "http": { +# "paths": [ +# { +# "path": "/", +# "backend": { +# "serviceName": "kafka-exporter", +# "servicePort": 9308, +# }, +# } +# ] +# }, +# } +# ] +# }, +# } +# ], +# }, +# } +# +# self.harness.charm.on.start.emit() +# +# # Initializing the kafka relation +# relation_id = self.harness.add_relation("kafka", "kafka") +# self.harness.add_relation_unit(relation_id, "kafka/0") +# self.harness.update_relation_data( +# relation_id, +# "kafka/0", +# { +# "host": "kafka", +# "port": "9090", +# }, +# ) +# +# self.harness.update_config({"site_url": "http://kafka-exporter"}) +# +# pod_spec, _ = self.harness.get_pod_spec() +# +# self.assertDictEqual(expected_result, pod_spec) +# +# def test_ingress_resources_with_https(self) -> NoReturn: +# """Test ingress resources with HTTPS.""" +# expected_result = { +# "version": 3, +# "containers": [ +# { +# "name": "kafka-exporter", +# "imageDetails": self.harness.charm.image.fetch(), +# "imagePullPolicy": "Always", +# "ports": [ +# { +# "name": "kafka-exporter", +# "containerPort": 9308, +# "protocol": "TCP", +# } +# ], +# "envConfig": {}, +# "command": ["kafka_exporter", "--kafka.server=kafka:9090"], +# "kubernetes": { +# "readinessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9308, +# }, +# "initialDelaySeconds": 10, +# "periodSeconds": 10, +# "timeoutSeconds": 5, +# "successThreshold": 1, +# "failureThreshold": 3, +# }, +# "livenessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9308, +# }, +# "initialDelaySeconds": 60, +# "timeoutSeconds": 30, +# "failureThreshold": 10, +# }, +# }, +# }, +# ], +# "kubernetesResources": { +# "ingressResources": [ +# { +# "name": "kafka-exporter-ingress", +# "annotations": {}, +# "spec": { +# "rules": [ +# { +# "host": "kafka-exporter", +# "http": { +# "paths": [ +# { +# "path": "/", +# "backend": { +# "serviceName": "kafka-exporter", +# "servicePort": 9308, +# }, +# } +# ] +# }, +# } +# ], +# "tls": [ +# { +# "hosts": ["kafka-exporter"], +# "secretName": "kafka-exporter", +# } +# ], +# }, +# } +# ], +# }, +# } +# +# self.harness.charm.on.start.emit() +# +# # Initializing the kafka relation +# relation_id = self.harness.add_relation("kafka", "kafka") +# self.harness.add_relation_unit(relation_id, "kafka/0") +# self.harness.update_relation_data( +# relation_id, +# "kafka/0", +# { +# "host": "kafka", +# "port": "9090", +# }, +# ) +# +# self.harness.update_config( +# { +# "site_url": "https://kafka-exporter", +# "tls_secret_name": "kafka-exporter", +# } +# ) +# +# pod_spec, _ = self.harness.get_pod_spec() +# +# self.assertDictEqual(expected_result, pod_spec) +# +# def test_ingress_resources_with_https_and_ingress_whitelist(self) -> NoReturn: +# """Test ingress resources with HTTPS and ingress whitelist.""" +# expected_result = { +# "version": 3, +# "containers": [ +# { +# "name": "kafka-exporter", +# "imageDetails": self.harness.charm.image.fetch(), +# "imagePullPolicy": "Always", +# "ports": [ +# { +# "name": "kafka-exporter", +# "containerPort": 9308, +# "protocol": "TCP", +# } +# ], +# "envConfig": {}, +# "command": ["kafka_exporter", "--kafka.server=kafka:9090"], +# "kubernetes": { +# "readinessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9308, +# }, +# "initialDelaySeconds": 10, +# "periodSeconds": 10, +# "timeoutSeconds": 5, +# "successThreshold": 1, +# "failureThreshold": 3, +# }, +# "livenessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9308, +# }, +# "initialDelaySeconds": 60, +# "timeoutSeconds": 30, +# "failureThreshold": 10, +# }, +# }, +# }, +# ], +# "kubernetesResources": { +# "ingressResources": [ +# { +# "name": "kafka-exporter-ingress", +# "annotations": { +# "nginx.ingress.kubernetes.io/whitelist-source-range": "0.0.0.0/0", +# }, +# "spec": { +# "rules": [ +# { +# "host": "kafka-exporter", +# "http": { +# "paths": [ +# { +# "path": "/", +# "backend": { +# "serviceName": "kafka-exporter", +# "servicePort": 9308, +# }, +# } +# ] +# }, +# } +# ], +# "tls": [ +# { +# "hosts": ["kafka-exporter"], +# "secretName": "kafka-exporter", +# } +# ], +# }, +# } +# ], +# }, +# } +# +# self.harness.charm.on.start.emit() +# +# # Initializing the kafka relation +# relation_id = self.harness.add_relation("kafka", "kafka") +# self.harness.add_relation_unit(relation_id, "kafka/0") +# self.harness.update_relation_data( +# relation_id, +# "kafka/0", +# { +# "host": "kafka", +# "port": "9090", +# }, +# ) +# +# self.harness.update_config( +# { +# "site_url": "https://kafka-exporter", +# "tls_secret_name": "kafka-exporter", +# "ingress_whitelist_source_range": "0.0.0.0/0", +# } +# ) +# +# pod_spec, _ = self.harness.get_pod_spec() +# +# self.assertDictEqual(expected_result, pod_spec) +# +# def test_on_kafka_unit_relation_changed(self) -> NoReturn: +# """Test to see if kafka relation is updated.""" +# self.harness.charm.on.start.emit() +# +# relation_id = self.harness.add_relation("kafka", "kafka") +# self.harness.add_relation_unit(relation_id, "kafka/0") +# self.harness.update_relation_data( +# relation_id, +# "kafka/0", +# { +# "host": "kafka", +# "port": "9090", +# }, +# ) +# +# # Verifying status +# self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus) +# +# def test_publish_target_info(self) -> NoReturn: +# """Test to see if target relation is updated.""" +# expected_result = { +# "hostname": "kafka-exporter", +# "port": "9308", +# "metrics_path": "/metrics", +# "scrape_interval": "30s", +# "scrape_timeout": "15s", +# } +# +# self.harness.charm.on.start.emit() +# +# relation_id = self.harness.add_relation("prometheus-scrape", "prometheus") +# self.harness.add_relation_unit(relation_id, "prometheus/0") +# relation_data = self.harness.get_relation_data(relation_id, "kafka-exporter/0") +# +# self.assertDictEqual(expected_result, relation_data) +# +# def test_publish_target_info_with_site_url(self) -> NoReturn: +# """Test to see if target relation is updated.""" +# expected_result = { +# "hostname": "kafka-exporter-osm", +# "port": "80", +# "metrics_path": "/metrics", +# "scrape_interval": "30s", +# "scrape_timeout": "15s", +# } +# +# self.harness.charm.on.start.emit() +# +# self.harness.update_config({"site_url": "http://kafka-exporter-osm"}) +# +# relation_id = self.harness.add_relation("prometheus-scrape", "prometheus") +# self.harness.add_relation_unit(relation_id, "prometheus/0") +# relation_data = self.harness.get_relation_data(relation_id, "kafka-exporter/0") +# +# self.assertDictEqual(expected_result, relation_data) +# +# def test_publish_dashboard_info(self) -> NoReturn: +# """Test to see if dashboard relation is updated.""" +# self.harness.charm.on.start.emit() +# +# relation_id = self.harness.add_relation("grafana-dashboard", "grafana") +# self.harness.add_relation_unit(relation_id, "grafana/0") +# relation_data = self.harness.get_relation_data(relation_id, "kafka-exporter/0") +# +# self.assertTrue("dashboard" in relation_data) +# self.assertTrue(len(relation_data["dashboard"]) > 0) +# +# +# if __name__ == "__main__": +# unittest.main() diff --git a/installers/charm/kafka-exporter/tox.ini b/installers/charm/kafka-exporter/tox.ini index a6dfd317..f207ac34 100644 --- a/installers/charm/kafka-exporter/tox.ini +++ b/installers/charm/kafka-exporter/tox.ini @@ -18,64 +18,107 @@ # To get in touch with the maintainers, please contact: # osm-charmers@lists.launchpad.net ## +####################################################################################### [tox] -skipsdist = True -envlist = unit, lint -sitepackages = False -skip_missing_interpreters = False +envlist = black, cover, flake8, pylint, yamllint, safety +skipsdist = true + +[tox:jenkins] +toxworkdir = /tmp/.tox [testenv] -basepython = python3 +basepython = python3.8 +setenv = VIRTUAL_ENV={envdir} + PYTHONDONTWRITEBYTECODE = 1 +deps = -r{toxinidir}/requirements.txt + + +####################################################################################### +[testenv:black] +deps = black +commands = + black --check --diff src/ tests/ + + +####################################################################################### +[testenv:cover] +deps = {[testenv]deps} + -r{toxinidir}/requirements-test.txt + coverage + nose2 +commands = + sh -c 'rm -f nosetests.xml' + coverage erase + nose2 -C --coverage src + coverage report --omit='*tests*' + coverage html -d ./cover --omit='*tests*' + coverage xml -o coverage.xml --omit=*tests* +whitelist_externals = sh + + +####################################################################################### +[testenv:flake8] +deps = flake8 + flake8-import-order +commands = + flake8 src/ tests/ + + +####################################################################################### +[testenv:pylint] +deps = {[testenv]deps} + -r{toxinidir}/requirements-test.txt + pylint +commands = + pylint -E src/ tests/ + + +####################################################################################### +[testenv:safety] setenv = - PYTHONHASHSEED=0 - PYTHONPATH = {toxinidir}/src - CHARM_NAME = kafka-exporter + LC_ALL=C.UTF-8 + LANG=C.UTF-8 +deps = {[testenv]deps} + safety +commands = + - safety check --full-report + +####################################################################################### +[testenv:yamllint] +deps = {[testenv]deps} + -r{toxinidir}/requirements-test.txt + yamllint +commands = yamllint . + +####################################################################################### [testenv:build] passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY +deps = charmcraft whitelist_externals = charmcraft - rm - unzip + cp commands = - rm -rf release kafka-exporter.charm charmcraft build - unzip kafka-exporter.charm -d release + cp -r build release -[testenv:unit] -commands = - coverage erase - stestr run --slowest --test-path=./tests --top-dir=./ - coverage combine - coverage html -d cover - coverage xml -o cover/coverage.xml - coverage report -deps = - coverage - stestr - mock - ops -setenv = - {[testenv]setenv} - PYTHON=coverage run - -[testenv:lint] -deps = - black - yamllint - flake8 -commands = - black --check --diff . --exclude "build/|.tox/|mod/|lib/" - yamllint . - flake8 . --max-line-length=100 --ignore="E501,W503,W504,F722" --exclude "build/ .tox/ mod/ lib/" - -[coverage:run] -branch = True -concurrency = multiprocessing -parallel = True -source = - . -omit = - .tox/* - tests/* +####################################################################################### +[flake8] +ignore = + W291, + W293, + W503, + E123, + E125, + E226, + E241, +exclude = + .git, + __pycache__, + .tox, +max-line-length = 120 +show-source = True +builtins = _ +max-complexity = 10 +import-order-style = google diff --git a/installers/charm/mongodb-exporter/.gitignore b/installers/charm/mongodb-exporter/.gitignore index a4d0de20..2885df27 100644 --- a/installers/charm/mongodb-exporter/.gitignore +++ b/installers/charm/mongodb-exporter/.gitignore @@ -22,7 +22,9 @@ venv .vscode build -mongodb-exporter.charm +*.charm .coverage +coverage.xml .stestr cover +release \ No newline at end of file diff --git a/installers/charm/mongodb-exporter/.jujuignore b/installers/charm/mongodb-exporter/.jujuignore new file mode 100644 index 00000000..3ae3e7dc --- /dev/null +++ b/installers/charm/mongodb-exporter/.jujuignore @@ -0,0 +1,34 @@ +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact: legal@canonical.com +# +# To get in touch with the maintainers, please contact: +# osm-charmers@lists.launchpad.net +## + +venv +.vscode +build +*.charm +.coverage +coverage.xml +.gitignore +.stestr +cover +release +tests/ +requirements* +tox.ini diff --git a/installers/charm/mongodb-exporter/.yamllint.yaml b/installers/charm/mongodb-exporter/.yamllint.yaml index f300159a..d71fb69f 100644 --- a/installers/charm/mongodb-exporter/.yamllint.yaml +++ b/installers/charm/mongodb-exporter/.yamllint.yaml @@ -28,4 +28,7 @@ yaml-files: - ".yamllint" ignore: | .tox + cover/ build/ + venv + release/ diff --git a/installers/charm/mongodb-exporter/config.yaml b/installers/charm/mongodb-exporter/config.yaml index 8d3703e4..206bca55 100644 --- a/installers/charm/mongodb-exporter/config.yaml +++ b/installers/charm/mongodb-exporter/config.yaml @@ -41,3 +41,6 @@ options: type: string description: Name of the cluster issuer for TLS certificates default: "" + mongodb_uri: + type: string + description: MongoDB URI (external database) diff --git a/installers/charm/mongodb-exporter/requirements-test.txt b/installers/charm/mongodb-exporter/requirements-test.txt new file mode 100644 index 00000000..316f6d20 --- /dev/null +++ b/installers/charm/mongodb-exporter/requirements-test.txt @@ -0,0 +1,21 @@ +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact: legal@canonical.com +# +# To get in touch with the maintainers, please contact: +# osm-charmers@lists.launchpad.net + +mock==4.0.3 diff --git a/installers/charm/mongodb-exporter/requirements.txt b/installers/charm/mongodb-exporter/requirements.txt index 884cf9f9..8bb93ad3 100644 --- a/installers/charm/mongodb-exporter/requirements.txt +++ b/installers/charm/mongodb-exporter/requirements.txt @@ -19,5 +19,4 @@ # osm-charmers@lists.launchpad.net ## -ops -git+https://github.com/juju-solutions/resource-oci-image/@c5778285d332edf3d9a538f9d0c06154b7ec1b0b#egg=oci-image +git+https://github.com/charmed-osm/ops-lib-charmed-osm/@master diff --git a/installers/charm/mongodb-exporter/src/charm.py b/installers/charm/mongodb-exporter/src/charm.py index 02a600c8..fd318faf 100755 --- a/installers/charm/mongodb-exporter/src/charm.py +++ b/installers/charm/mongodb-exporter/src/charm.py @@ -20,205 +20,227 @@ # osm-charmers@lists.launchpad.net ## +# pylint: disable=E0213 + +from ipaddress import ip_network import logging from pathlib import Path -from typing import Dict, List, NoReturn +from typing import NoReturn, Optional from urllib.parse import urlparse -from ops.charm import CharmBase -from ops.framework import EventBase, StoredState from ops.main import main -from ops.model import ActiveStatus, Application, BlockedStatus, MaintenanceStatus, Unit -from oci_image import OCIImageResource, OCIImageResourceError +from opslib.osm.charm import CharmedOsmBase, RelationsMissing +from opslib.osm.interfaces.grafana import GrafanaDashboardTarget +from opslib.osm.interfaces.mongo import MongoClient +from opslib.osm.interfaces.prometheus import PrometheusScrapeTarget +from opslib.osm.pod import ( + ContainerV3Builder, + IngressResourceV3Builder, + PodSpecV3Builder, +) +from opslib.osm.validator import ModelValidator, validator -from pod_spec import make_pod_spec logger = logging.getLogger(__name__) -MONGODB_EXPORTER_PORT = 9216 +PORT = 9216 -class RelationsMissing(Exception): - def __init__(self, missing_relations: List): - self.message = "" - if missing_relations and isinstance(missing_relations, list): - self.message += f'Waiting for {", ".join(missing_relations)} relation' - if "," in self.message: - self.message += "s" +class ConfigModel(ModelValidator): + site_url: Optional[str] + cluster_issuer: Optional[str] + ingress_whitelist_source_range: Optional[str] + tls_secret_name: Optional[str] + mongodb_uri: Optional[str] + @validator("site_url") + def validate_site_url(cls, v): + if v: + parsed = urlparse(v) + if not parsed.scheme.startswith("http"): + raise ValueError("value must start with http") + return v -class RelationDefinition: - def __init__(self, relation_name: str, keys: List, source_type): - if source_type != Application and source_type != Unit: - raise TypeError( - "source_type should be ops.model.Application or ops.model.Unit" - ) - self.relation_name = relation_name - self.keys = keys - self.source_type = source_type - - -def check_missing_relation_data( - data: Dict, - expected_relations_data: List[RelationDefinition], -): - missing_relations = [] - for relation_data in expected_relations_data: - if not all( - f"{relation_data.relation_name}_{k}" in data for k in relation_data.keys - ): - missing_relations.append(relation_data.relation_name) - if missing_relations: - raise RelationsMissing(missing_relations) - - -def get_relation_data( - charm: CharmBase, - relation_data: RelationDefinition, -) -> Dict: - data = {} - relation = charm.model.get_relation(relation_data.relation_name) - if relation: - self_app_unit = ( - charm.app if relation_data.source_type == Application else charm.unit - ) - expected_type = relation_data.source_type - for app_unit in relation.data: - if app_unit != self_app_unit and isinstance(app_unit, expected_type): - if all(k in relation.data[app_unit] for k in relation_data.keys): - for k in relation_data.keys: - data[f"{relation_data.relation_name}_{k}"] = relation.data[ - app_unit - ].get(k) - break - return data + @validator("ingress_whitelist_source_range") + def validate_ingress_whitelist_source_range(cls, v): + if v: + ip_network(v) + return v + @validator("mongodb_uri") + def validate_mongodb_uri(cls, v): + if v and not v.startswith("mongodb://"): + raise ValueError("mongodb_uri is not properly formed") + return v -class MongodbExporterCharm(CharmBase): - """Mongodb Exporter Charm.""" - - state = StoredState() +class MongodbExporterCharm(CharmedOsmBase): def __init__(self, *args) -> NoReturn: - """Mongodb Exporter Charm constructor.""" - super().__init__(*args) - - # Internal state initialization - self.state.set_default(pod_spec=None) + super().__init__(*args, oci_image="image") - self.port = MONGODB_EXPORTER_PORT - self.image = OCIImageResource(self, "image") + # Provision Kafka relation to exchange information + self.mongodb_client = MongoClient(self, "mongodb") + self.framework.observe(self.on["mongodb"].relation_changed, self.configure_pod) + self.framework.observe(self.on["mongodb"].relation_broken, self.configure_pod) - # Registering regular events - self.framework.observe(self.on.start, self.configure_pod) - self.framework.observe(self.on.config_changed, self.configure_pod) - - # Registering required relation events - self.framework.observe(self.on.mongodb_relation_changed, self.configure_pod) - - # Registering required relation departed events - self.framework.observe(self.on.mongodb_relation_departed, self.configure_pod) - - # Registering provided relation events + # Register relation to provide a Scraping Target + self.scrape_target = PrometheusScrapeTarget(self, "prometheus-scrape") self.framework.observe( - self.on.prometheus_scrape_relation_joined, self._publish_scrape_info + self.on["prometheus-scrape"].relation_joined, self._publish_scrape_info ) + + # Register relation to provide a Dasboard Target + self.dashboard_target = GrafanaDashboardTarget(self, "grafana-dashboard") self.framework.observe( - self.on.grafana_dashboard_relation_joined, self._publish_dashboard_info + self.on["grafana-dashboard"].relation_joined, self._publish_dashboard_info ) - def _publish_scrape_info(self, event: EventBase) -> NoReturn: - """Publishes scrape information. + def _publish_scrape_info(self, event) -> NoReturn: + """Publishes scraping information for Prometheus. Args: - event (EventBase): Exporter relation event. + event (EventBase): Prometheus relation event. """ - rel_data = { - "hostname": urlparse(self.model.config["site_url"]).hostname - if self.model.config["site_url"] - else self.model.app.name, - "port": "80" - if self.model.config["site_url"] - else str(MONGODB_EXPORTER_PORT), - "metrics_path": "/metrics", - "scrape_interval": "30s", - "scrape_timeout": "15s", - } - for k, v in rel_data.items(): - event.relation.data[self.unit][k] = v - - def _publish_dashboard_info(self, event: EventBase) -> NoReturn: - """Publishes dashboard information. + if self.unit.is_leader(): + hostname = ( + urlparse(self.model.config["site_url"]).hostname + if self.model.config["site_url"] + else self.model.app.name + ) + port = str(PORT) + if self.model.config.get("site_url", "").startswith("https://"): + port = "443" + elif self.model.config.get("site_url", "").startswith("http://"): + port = "80" + + self.scrape_target.publish_info( + hostname=hostname, + port=port, + metrics_path="/metrics", + scrape_interval="30s", + scrape_timeout="15s", + ) + + def _publish_dashboard_info(self, event) -> NoReturn: + """Publish dashboards for Grafana. Args: - event (EventBase): Exporter relation event. + event (EventBase): Grafana relation event. """ - rel_data = { - "name": "osm-mongodb", - "dashboard": Path("files/mongodb_exporter_dashboard.json").read_text(), - } - for k, v in rel_data.items(): - event.relation.data[self.unit][k] = v - - @property - def relations_requirements(self): - return [RelationDefinition("mongodb", ["connection_string"], Unit)] - - def get_relation_state(self): - relation_state = {} - for relation_requirements in self.relations_requirements: - data = get_relation_data(self, relation_requirements) - relation_state = {**relation_state, **data} - check_missing_relation_data(relation_state, self.relations_requirements) - return relation_state - - def configure_pod(self, _=None) -> NoReturn: - """Assemble the pod spec and apply it, if possible. + if self.unit.is_leader(): + self.dashboard_target.publish_info( + name="osm-mongodb", + dashboard=Path("files/mongodb_exporter_dashboard.json").read_text(), + ) + + def _check_missing_dependencies(self, config: ConfigModel): + """Check if there is any relation missing. Args: - event (EventBase): Hook or Relation event that started the - function. + config (ConfigModel): object with configuration information. + + Raises: + RelationsMissing: if kafka is missing. """ - if not self.unit.is_leader(): - self.unit.status = ActiveStatus("ready") - return - - relation_state = None - try: - relation_state = self.get_relation_state() - except RelationsMissing as exc: - logger.exception("Relation missing error") - self.unit.status = BlockedStatus(exc.message) - return - - self.unit.status = MaintenanceStatus("Assembling pod spec") - - # Fetch image information - try: - self.unit.status = MaintenanceStatus("Fetching image information") - image_info = self.image.fetch() - except OCIImageResourceError: - self.unit.status = BlockedStatus("Error fetching image information") - return - - try: - pod_spec = make_pod_spec( - image_info, - self.model.config, - relation_state, - self.model.app.name, - self.port, + missing_relations = [] + + if not config.mongodb_uri and self.mongodb_client.is_missing_data_in_unit(): + missing_relations.append("mongodb") + + if missing_relations: + raise RelationsMissing(missing_relations) + + def build_pod_spec(self, image_info): + """Build the PodSpec to be used. + + Args: + image_info (str): container image information. + + Returns: + Dict: PodSpec information. + """ + # Validate config + config = ConfigModel(**dict(self.config)) + + if config.mongodb_uri and not self.mongodb_client.is_missing_data_in_unit(): + raise Exception("Mongodb data cannot be provided via config and relation") + + # Check relations + self._check_missing_dependencies(config) + + # Create Builder for the PodSpec + pod_spec_builder = PodSpecV3Builder() + + # Build container + container_builder = ContainerV3Builder(self.app.name, image_info) + container_builder.add_port(name=self.app.name, port=PORT) + container_builder.add_http_readiness_probe( + path="/api/health", + port=PORT, + initial_delay_seconds=10, + period_seconds=10, + timeout_seconds=5, + success_threshold=1, + failure_threshold=3, + ) + container_builder.add_http_liveness_probe( + path="/api/health", + port=PORT, + initial_delay_seconds=60, + timeout_seconds=30, + failure_threshold=10, + ) + + unparsed = ( + config.mongodb_uri + if config.mongodb_uri + else self.mongodb_client.connection_string + ) + parsed = urlparse(unparsed) + mongodb_uri = f"mongodb://{parsed.netloc.split(',')[0]}{parsed.path}" + if parsed.query: + mongodb_uri += f"?{parsed.query}" + + container_builder.add_envs( + { + "MONGODB_URI": mongodb_uri, + } + ) + container = container_builder.build() + + # Add container to PodSpec + pod_spec_builder.add_container(container) + + # Add ingress resources to PodSpec if site url exists + if config.site_url: + parsed = urlparse(config.site_url) + annotations = {} + ingress_resource_builder = IngressResourceV3Builder( + f"{self.app.name}-ingress", annotations ) - except ValueError as exc: - logger.exception("Config/Relation data validation error") - self.unit.status = BlockedStatus(str(exc)) - return - if self.state.pod_spec != pod_spec: - self.model.pod.set_spec(pod_spec) - self.state.pod_spec = pod_spec + if config.ingress_whitelist_source_range: + annotations[ + "nginx.ingress.kubernetes.io/whitelist-source-range" + ] = config.ingress_whitelist_source_range + + if config.cluster_issuer: + annotations["cert-manager.io/cluster-issuer"] = config.cluster_issuer + + if parsed.scheme == "https": + ingress_resource_builder.add_tls( + [parsed.hostname], config.tls_secret_name + ) + else: + annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false" + + ingress_resource_builder.add_rule(parsed.hostname, self.app.name, PORT) + ingress_resource = ingress_resource_builder.build() + pod_spec_builder.add_ingress_resource(ingress_resource) + + logger.debug(pod_spec_builder.build()) - self.unit.status = ActiveStatus("ready") + return pod_spec_builder.build() if __name__ == "__main__": diff --git a/installers/charm/mongodb-exporter/src/pod_spec.py b/installers/charm/mongodb-exporter/src/pod_spec.py index 0cc3f8ca..ff42e02c 100644 --- a/installers/charm/mongodb-exporter/src/pod_spec.py +++ b/installers/charm/mongodb-exporter/src/pod_spec.py @@ -20,8 +20,8 @@ # osm-charmers@lists.launchpad.net ## -import logging from ipaddress import ip_network +import logging from typing import Any, Dict, List from urllib.parse import urlparse diff --git a/installers/charm/mongodb-exporter/tests/__init__.py b/installers/charm/mongodb-exporter/tests/__init__.py index 4fd849a5..90dc417c 100644 --- a/installers/charm/mongodb-exporter/tests/__init__.py +++ b/installers/charm/mongodb-exporter/tests/__init__.py @@ -23,9 +23,17 @@ """Init mocking for unit tests.""" import sys + import mock + +class OCIImageResourceErrorMock(Exception): + pass + + sys.path.append("src") oci_image = mock.MagicMock() +oci_image.OCIImageResourceError = OCIImageResourceErrorMock sys.modules["oci_image"] = oci_image +sys.modules["oci_image"].OCIImageResource().fetch.return_value = {} diff --git a/installers/charm/mongodb-exporter/tests/test_charm.py b/installers/charm/mongodb-exporter/tests/test_charm.py index 372886b4..1675f5f5 100644 --- a/installers/charm/mongodb-exporter/tests/test_charm.py +++ b/installers/charm/mongodb-exporter/tests/test_charm.py @@ -20,13 +20,13 @@ # osm-charmers@lists.launchpad.net ## +import sys from typing import NoReturn import unittest -from ops.model import BlockedStatus -from ops.testing import Harness - from charm import MongodbExporterCharm +from ops.model import ActiveStatus, BlockedStatus +from ops.testing import Harness class TestCharm(unittest.TestCase): @@ -34,462 +34,550 @@ class TestCharm(unittest.TestCase): def setUp(self) -> NoReturn: """Test setup""" + self.image_info = sys.modules["oci_image"].OCIImageResource().fetch() self.harness = Harness(MongodbExporterCharm) self.harness.set_leader(is_leader=True) self.harness.begin() + self.config = { + "ingress_whitelist_source_range": "", + "tls_secret_name": "", + "site_url": "https://mongodb-exporter.192.168.100.100.nip.io", + "cluster_issuer": "vault-issuer", + } + self.harness.update_config(self.config) - def test_on_start_without_relations(self) -> NoReturn: - """Test installation without any relation.""" - self.harness.charm.on.start.emit() + def test_config_changed_no_relations( + self, + ) -> NoReturn: + """Test ingress resources without HTTP.""" - # Verifying status - self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus) + self.harness.charm.on.config_changed.emit() - # Verifying status message - self.assertGreater(len(self.harness.charm.unit.status.message), 0) + # Assertions + self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus) + print(self.harness.charm.unit.status.message) self.assertTrue( - self.harness.charm.unit.status.message.startswith("Waiting for ") + all( + relation in self.harness.charm.unit.status.message + for relation in ["mongodb"] + ) ) - self.assertIn("mongodb", self.harness.charm.unit.status.message) - self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation")) - def test_on_start_with_relations_without_http(self) -> NoReturn: - """Test deployment.""" - expected_result = { - "version": 3, - "containers": [ - { - "name": "mongodb-exporter", - "imageDetails": self.harness.charm.image.fetch(), - "imagePullPolicy": "Always", - "ports": [ - { - "name": "mongo-exporter", - "containerPort": 9216, - "protocol": "TCP", - } - ], - "envConfig": { - "MONGODB_URI": "mongodb://mongo", - }, - "kubernetes": { - "readinessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9216, - }, - "initialDelaySeconds": 10, - "periodSeconds": 10, - "timeoutSeconds": 5, - "successThreshold": 1, - "failureThreshold": 3, - }, - "livenessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9216, - }, - "initialDelaySeconds": 60, - "timeoutSeconds": 30, - "failureThreshold": 10, - }, - }, - }, - ], - "kubernetesResources": {"ingressResources": []}, - } + def test_config_changed_non_leader( + self, + ) -> NoReturn: + """Test ingress resources without HTTP.""" + self.harness.set_leader(is_leader=False) + self.harness.charm.on.config_changed.emit() - self.harness.charm.on.start.emit() + # Assertions + self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus) - # Initializing the mongodb relation - relation_id = self.harness.add_relation("mongodb", "mongodb") - self.harness.add_relation_unit(relation_id, "mongodb/0") - self.harness.update_relation_data( - relation_id, - "mongodb/0", - { - "connection_string": "mongodb://mongo", - }, - ) + def test_with_relations( + self, + ) -> NoReturn: + "Test with relations" + self.initialize_mongo_relation() # Verifying status self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus) - pod_spec, _ = self.harness.get_pod_spec() - - self.assertDictEqual(expected_result, pod_spec) - - def test_ingress_resources_with_http(self) -> NoReturn: - """Test ingress resources with HTTP.""" - expected_result = { - "version": 3, - "containers": [ - { - "name": "mongodb-exporter", - "imageDetails": self.harness.charm.image.fetch(), - "imagePullPolicy": "Always", - "ports": [ - { - "name": "mongo-exporter", - "containerPort": 9216, - "protocol": "TCP", - } - ], - "envConfig": { - "MONGODB_URI": "mongodb://mongo", - }, - "kubernetes": { - "readinessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9216, - }, - "initialDelaySeconds": 10, - "periodSeconds": 10, - "timeoutSeconds": 5, - "successThreshold": 1, - "failureThreshold": 3, - }, - "livenessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9216, - }, - "initialDelaySeconds": 60, - "timeoutSeconds": 30, - "failureThreshold": 10, - }, - }, - }, - ], - "kubernetesResources": { - "ingressResources": [ - { - "name": "mongodb-exporter-ingress", - "annotations": { - "nginx.ingress.kubernetes.io/ssl-redirect": "false", - }, - "spec": { - "rules": [ - { - "host": "mongodb-exporter", - "http": { - "paths": [ - { - "path": "/", - "backend": { - "serviceName": "mongodb-exporter", - "servicePort": 9216, - }, - } - ] - }, - } - ] - }, - } - ], - }, - } - - self.harness.charm.on.start.emit() - - # Initializing the mongodb relation - relation_id = self.harness.add_relation("mongodb", "mongodb") - self.harness.add_relation_unit(relation_id, "mongodb/0") - self.harness.update_relation_data( - relation_id, - "mongodb/0", - { - "connection_string": "mongodb://mongo", - }, - ) - - self.harness.update_config({"site_url": "http://mongodb-exporter"}) - - pod_spec, _ = self.harness.get_pod_spec() - - self.assertDictEqual(expected_result, pod_spec) - - def test_ingress_resources_with_https(self) -> NoReturn: - """Test ingress resources with HTTPS.""" - expected_result = { - "version": 3, - "containers": [ - { - "name": "mongodb-exporter", - "imageDetails": self.harness.charm.image.fetch(), - "imagePullPolicy": "Always", - "ports": [ - { - "name": "mongo-exporter", - "containerPort": 9216, - "protocol": "TCP", - } - ], - "envConfig": { - "MONGODB_URI": "mongodb://mongo", - }, - "kubernetes": { - "readinessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9216, - }, - "initialDelaySeconds": 10, - "periodSeconds": 10, - "timeoutSeconds": 5, - "successThreshold": 1, - "failureThreshold": 3, - }, - "livenessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9216, - }, - "initialDelaySeconds": 60, - "timeoutSeconds": 30, - "failureThreshold": 10, - }, - }, - }, - ], - "kubernetesResources": { - "ingressResources": [ - { - "name": "mongodb-exporter-ingress", - "annotations": {}, - "spec": { - "rules": [ - { - "host": "mongodb-exporter", - "http": { - "paths": [ - { - "path": "/", - "backend": { - "serviceName": "mongodb-exporter", - "servicePort": 9216, - }, - } - ] - }, - } - ], - "tls": [ - { - "hosts": ["mongodb-exporter"], - "secretName": "mongodb-exporter", - } - ], - }, - } - ], - }, - } - - self.harness.charm.on.start.emit() - - # Initializing the mongodb relation - relation_id = self.harness.add_relation("mongodb", "mongodb") - self.harness.add_relation_unit(relation_id, "mongodb/0") - self.harness.update_relation_data( - relation_id, - "mongodb/0", - { - "connection_string": "mongodb://mongo", - }, - ) - - self.harness.update_config( - { - "site_url": "https://mongodb-exporter", - "tls_secret_name": "mongodb-exporter", - } - ) - - pod_spec, _ = self.harness.get_pod_spec() - - self.assertDictEqual(expected_result, pod_spec) - - def test_ingress_resources_with_https_and_ingress_whitelist(self) -> NoReturn: - """Test ingress resources with HTTPS and ingress whitelist.""" - expected_result = { - "version": 3, - "containers": [ - { - "name": "mongodb-exporter", - "imageDetails": self.harness.charm.image.fetch(), - "imagePullPolicy": "Always", - "ports": [ - { - "name": "mongo-exporter", - "containerPort": 9216, - "protocol": "TCP", - } - ], - "envConfig": { - "MONGODB_URI": "mongodb://mongo", - }, - "kubernetes": { - "readinessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9216, - }, - "initialDelaySeconds": 10, - "periodSeconds": 10, - "timeoutSeconds": 5, - "successThreshold": 1, - "failureThreshold": 3, - }, - "livenessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9216, - }, - "initialDelaySeconds": 60, - "timeoutSeconds": 30, - "failureThreshold": 10, - }, - }, - }, - ], - "kubernetesResources": { - "ingressResources": [ - { - "name": "mongodb-exporter-ingress", - "annotations": { - "nginx.ingress.kubernetes.io/whitelist-source-range": "0.0.0.0/0", - }, - "spec": { - "rules": [ - { - "host": "mongodb-exporter", - "http": { - "paths": [ - { - "path": "/", - "backend": { - "serviceName": "mongodb-exporter", - "servicePort": 9216, - }, - } - ] - }, - } - ], - "tls": [ - { - "hosts": ["mongodb-exporter"], - "secretName": "mongodb-exporter", - } - ], - }, - } - ], - }, - } - - self.harness.charm.on.start.emit() - - # Initializing the mongodb relation - relation_id = self.harness.add_relation("mongodb", "mongodb") - self.harness.add_relation_unit(relation_id, "mongodb/0") - self.harness.update_relation_data( - relation_id, - "mongodb/0", - { - "connection_string": "mongodb://mongo", - }, - ) - - self.harness.update_config( - { - "site_url": "https://mongodb-exporter", - "tls_secret_name": "mongodb-exporter", - "ingress_whitelist_source_range": "0.0.0.0/0", - } - ) - - pod_spec, _ = self.harness.get_pod_spec() - - self.assertDictEqual(expected_result, pod_spec) - - def test_on_mongodb_unit_relation_changed(self) -> NoReturn: - """Test to see if mongodb relation is updated.""" - self.harness.charm.on.start.emit() - - # Initializing the mongodb relation - relation_id = self.harness.add_relation("mongodb", "mongodb") - self.harness.add_relation_unit(relation_id, "mongodb/0") - self.harness.update_relation_data( - relation_id, - "mongodb/0", - { - "connection_string": "mongodb://mongo", - }, - ) + def test_with_config( + self, + ) -> NoReturn: + "Test with config" + self.initialize_mongo_relation() # Verifying status self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus) - def test_publish_scrape_info(self) -> NoReturn: - """Test to see if scrape relation is updated.""" - expected_result = { - "hostname": "mongodb-exporter", - "port": "9216", - "metrics_path": "/metrics", - "scrape_interval": "30s", - "scrape_timeout": "15s", - } - - self.harness.charm.on.start.emit() - - relation_id = self.harness.add_relation("prometheus-scrape", "prometheus") - self.harness.add_relation_unit(relation_id, "prometheus/0") - relation_data = self.harness.get_relation_data( - relation_id, "mongodb-exporter/0" - ) - - self.assertDictEqual(expected_result, relation_data) - - def test_publish_scrape_info_with_site_url(self) -> NoReturn: - """Test to see if target relation is updated.""" - expected_result = { - "hostname": "mongodb-exporter-osm", - "port": "80", - "metrics_path": "/metrics", - "scrape_interval": "30s", - "scrape_timeout": "15s", - } - - self.harness.charm.on.start.emit() - - self.harness.update_config({"site_url": "http://mongodb-exporter-osm"}) - - relation_id = self.harness.add_relation("prometheus-scrape", "prometheus") - self.harness.add_relation_unit(relation_id, "prometheus/0") - relation_data = self.harness.get_relation_data( - relation_id, "mongodb-exporter/0" - ) + def test_mongodb_exception_relation_and_config( + self, + ) -> NoReturn: + self.initialize_mongo_config() + self.initialize_mongo_relation() - self.assertDictEqual(expected_result, relation_data) - - def test_publish_dashboard_info(self) -> NoReturn: - """Test to see if dashboard relation is updated.""" - self.harness.charm.on.start.emit() + # Verifying status + self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus) - relation_id = self.harness.add_relation("grafana-dashboard", "grafana") - self.harness.add_relation_unit(relation_id, "grafana/0") - relation_data = self.harness.get_relation_data( - relation_id, "mongodb-exporter/0" + def initialize_mongo_relation(self): + mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb") + self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0") + self.harness.update_relation_data( + mongodb_relation_id, + "mongodb/0", + {"connection_string": "mongodb://mongo:27017"}, ) - self.assertEqual("osm-mongodb", relation_data["name"]) - self.assertTrue("dashboard" in relation_data) - self.assertTrue(len(relation_data["dashboard"]) > 0) + def initialize_mongo_config(self): + self.harness.update_config({"mongodb_uri": "mongodb://mongo:27017"}) if __name__ == "__main__": unittest.main() + + +# class TestCharm(unittest.TestCase): +# """Mongodb Exporter Charm unit tests.""" +# +# def setUp(self) -> NoReturn: +# """Test setup""" +# self.harness = Harness(MongodbExporterCharm) +# self.harness.set_leader(is_leader=True) +# self.harness.begin() +# +# def test_on_start_without_relations(self) -> NoReturn: +# """Test installation without any relation.""" +# self.harness.charm.on.start.emit() +# +# # Verifying status +# self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus) +# +# # Verifying status message +# self.assertGreater(len(self.harness.charm.unit.status.message), 0) +# self.assertTrue( +# self.harness.charm.unit.status.message.startswith("Waiting for ") +# ) +# self.assertIn("mongodb", self.harness.charm.unit.status.message) +# self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation")) +# +# def test_on_start_with_relations_without_http(self) -> NoReturn: +# """Test deployment.""" +# expected_result = { +# "version": 3, +# "containers": [ +# { +# "name": "mongodb-exporter", +# "imageDetails": self.harness.charm.image.fetch(), +# "imagePullPolicy": "Always", +# "ports": [ +# { +# "name": "mongo-exporter", +# "containerPort": 9216, +# "protocol": "TCP", +# } +# ], +# "envConfig": { +# "MONGODB_URI": "mongodb://mongo", +# }, +# "kubernetes": { +# "readinessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9216, +# }, +# "initialDelaySeconds": 10, +# "periodSeconds": 10, +# "timeoutSeconds": 5, +# "successThreshold": 1, +# "failureThreshold": 3, +# }, +# "livenessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9216, +# }, +# "initialDelaySeconds": 60, +# "timeoutSeconds": 30, +# "failureThreshold": 10, +# }, +# }, +# }, +# ], +# "kubernetesResources": {"ingressResources": []}, +# } +# +# self.harness.charm.on.start.emit() +# +# # Initializing the mongodb relation +# relation_id = self.harness.add_relation("mongodb", "mongodb") +# self.harness.add_relation_unit(relation_id, "mongodb/0") +# self.harness.update_relation_data( +# relation_id, +# "mongodb/0", +# { +# "connection_string": "mongodb://mongo", +# }, +# ) +# +# # Verifying status +# self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus) +# +# pod_spec, _ = self.harness.get_pod_spec() +# +# self.assertDictEqual(expected_result, pod_spec) +# +# def test_ingress_resources_with_http(self) -> NoReturn: +# """Test ingress resources with HTTP.""" +# expected_result = { +# "version": 3, +# "containers": [ +# { +# "name": "mongodb-exporter", +# "imageDetails": self.harness.charm.image.fetch(), +# "imagePullPolicy": "Always", +# "ports": [ +# { +# "name": "mongo-exporter", +# "containerPort": 9216, +# "protocol": "TCP", +# } +# ], +# "envConfig": { +# "MONGODB_URI": "mongodb://mongo", +# }, +# "kubernetes": { +# "readinessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9216, +# }, +# "initialDelaySeconds": 10, +# "periodSeconds": 10, +# "timeoutSeconds": 5, +# "successThreshold": 1, +# "failureThreshold": 3, +# }, +# "livenessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9216, +# }, +# "initialDelaySeconds": 60, +# "timeoutSeconds": 30, +# "failureThreshold": 10, +# }, +# }, +# }, +# ], +# "kubernetesResources": { +# "ingressResources": [ +# { +# "name": "mongodb-exporter-ingress", +# "annotations": { +# "nginx.ingress.kubernetes.io/ssl-redirect": "false", +# }, +# "spec": { +# "rules": [ +# { +# "host": "mongodb-exporter", +# "http": { +# "paths": [ +# { +# "path": "/", +# "backend": { +# "serviceName": "mongodb-exporter", +# "servicePort": 9216, +# }, +# } +# ] +# }, +# } +# ] +# }, +# } +# ], +# }, +# } +# +# self.harness.charm.on.start.emit() +# +# # Initializing the mongodb relation +# relation_id = self.harness.add_relation("mongodb", "mongodb") +# self.harness.add_relation_unit(relation_id, "mongodb/0") +# self.harness.update_relation_data( +# relation_id, +# "mongodb/0", +# { +# "connection_string": "mongodb://mongo", +# }, +# ) +# +# self.harness.update_config({"site_url": "http://mongodb-exporter"}) +# +# pod_spec, _ = self.harness.get_pod_spec() +# +# self.assertDictEqual(expected_result, pod_spec) +# +# def test_ingress_resources_with_https(self) -> NoReturn: +# """Test ingress resources with HTTPS.""" +# expected_result = { +# "version": 3, +# "containers": [ +# { +# "name": "mongodb-exporter", +# "imageDetails": self.harness.charm.image.fetch(), +# "imagePullPolicy": "Always", +# "ports": [ +# { +# "name": "mongo-exporter", +# "containerPort": 9216, +# "protocol": "TCP", +# } +# ], +# "envConfig": { +# "MONGODB_URI": "mongodb://mongo", +# }, +# "kubernetes": { +# "readinessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9216, +# }, +# "initialDelaySeconds": 10, +# "periodSeconds": 10, +# "timeoutSeconds": 5, +# "successThreshold": 1, +# "failureThreshold": 3, +# }, +# "livenessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9216, +# }, +# "initialDelaySeconds": 60, +# "timeoutSeconds": 30, +# "failureThreshold": 10, +# }, +# }, +# }, +# ], +# "kubernetesResources": { +# "ingressResources": [ +# { +# "name": "mongodb-exporter-ingress", +# "annotations": {}, +# "spec": { +# "rules": [ +# { +# "host": "mongodb-exporter", +# "http": { +# "paths": [ +# { +# "path": "/", +# "backend": { +# "serviceName": "mongodb-exporter", +# "servicePort": 9216, +# }, +# } +# ] +# }, +# } +# ], +# "tls": [ +# { +# "hosts": ["mongodb-exporter"], +# "secretName": "mongodb-exporter", +# } +# ], +# }, +# } +# ], +# }, +# } +# +# self.harness.charm.on.start.emit() +# +# # Initializing the mongodb relation +# relation_id = self.harness.add_relation("mongodb", "mongodb") +# self.harness.add_relation_unit(relation_id, "mongodb/0") +# self.harness.update_relation_data( +# relation_id, +# "mongodb/0", +# { +# "connection_string": "mongodb://mongo", +# }, +# ) +# +# self.harness.update_config( +# { +# "site_url": "https://mongodb-exporter", +# "tls_secret_name": "mongodb-exporter", +# } +# ) +# +# pod_spec, _ = self.harness.get_pod_spec() +# +# self.assertDictEqual(expected_result, pod_spec) +# +# def test_ingress_resources_with_https_and_ingress_whitelist(self) -> NoReturn: +# """Test ingress resources with HTTPS and ingress whitelist.""" +# expected_result = { +# "version": 3, +# "containers": [ +# { +# "name": "mongodb-exporter", +# "imageDetails": self.harness.charm.image.fetch(), +# "imagePullPolicy": "Always", +# "ports": [ +# { +# "name": "mongo-exporter", +# "containerPort": 9216, +# "protocol": "TCP", +# } +# ], +# "envConfig": { +# "MONGODB_URI": "mongodb://mongo", +# }, +# "kubernetes": { +# "readinessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9216, +# }, +# "initialDelaySeconds": 10, +# "periodSeconds": 10, +# "timeoutSeconds": 5, +# "successThreshold": 1, +# "failureThreshold": 3, +# }, +# "livenessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9216, +# }, +# "initialDelaySeconds": 60, +# "timeoutSeconds": 30, +# "failureThreshold": 10, +# }, +# }, +# }, +# ], +# "kubernetesResources": { +# "ingressResources": [ +# { +# "name": "mongodb-exporter-ingress", +# "annotations": { +# "nginx.ingress.kubernetes.io/whitelist-source-range": "0.0.0.0/0", +# }, +# "spec": { +# "rules": [ +# { +# "host": "mongodb-exporter", +# "http": { +# "paths": [ +# { +# "path": "/", +# "backend": { +# "serviceName": "mongodb-exporter", +# "servicePort": 9216, +# }, +# } +# ] +# }, +# } +# ], +# "tls": [ +# { +# "hosts": ["mongodb-exporter"], +# "secretName": "mongodb-exporter", +# } +# ], +# }, +# } +# ], +# }, +# } +# +# self.harness.charm.on.start.emit() +# +# # Initializing the mongodb relation +# relation_id = self.harness.add_relation("mongodb", "mongodb") +# self.harness.add_relation_unit(relation_id, "mongodb/0") +# self.harness.update_relation_data( +# relation_id, +# "mongodb/0", +# { +# "connection_string": "mongodb://mongo", +# }, +# ) +# +# self.harness.update_config( +# { +# "site_url": "https://mongodb-exporter", +# "tls_secret_name": "mongodb-exporter", +# "ingress_whitelist_source_range": "0.0.0.0/0", +# } +# ) +# +# pod_spec, _ = self.harness.get_pod_spec() +# +# self.assertDictEqual(expected_result, pod_spec) +# +# def test_on_mongodb_unit_relation_changed(self) -> NoReturn: +# """Test to see if mongodb relation is updated.""" +# self.harness.charm.on.start.emit() +# +# # Initializing the mongodb relation +# relation_id = self.harness.add_relation("mongodb", "mongodb") +# self.harness.add_relation_unit(relation_id, "mongodb/0") +# self.harness.update_relation_data( +# relation_id, +# "mongodb/0", +# { +# "connection_string": "mongodb://mongo", +# }, +# ) +# +# # Verifying status +# self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus) +# +# def test_publish_scrape_info(self) -> NoReturn: +# """Test to see if scrape relation is updated.""" +# expected_result = { +# "hostname": "mongodb-exporter", +# "port": "9216", +# "metrics_path": "/metrics", +# "scrape_interval": "30s", +# "scrape_timeout": "15s", +# } +# +# self.harness.charm.on.start.emit() +# +# relation_id = self.harness.add_relation("prometheus-scrape", "prometheus") +# self.harness.add_relation_unit(relation_id, "prometheus/0") +# relation_data = self.harness.get_relation_data( +# relation_id, "mongodb-exporter/0" +# ) +# +# self.assertDictEqual(expected_result, relation_data) +# +# def test_publish_scrape_info_with_site_url(self) -> NoReturn: +# """Test to see if target relation is updated.""" +# expected_result = { +# "hostname": "mongodb-exporter-osm", +# "port": "80", +# "metrics_path": "/metrics", +# "scrape_interval": "30s", +# "scrape_timeout": "15s", +# } +# +# self.harness.charm.on.start.emit() +# +# self.harness.update_config({"site_url": "http://mongodb-exporter-osm"}) +# +# relation_id = self.harness.add_relation("prometheus-scrape", "prometheus") +# self.harness.add_relation_unit(relation_id, "prometheus/0") +# relation_data = self.harness.get_relation_data( +# relation_id, "mongodb-exporter/0" +# ) +# +# self.assertDictEqual(expected_result, relation_data) +# +# def test_publish_dashboard_info(self) -> NoReturn: +# """Test to see if dashboard relation is updated.""" +# self.harness.charm.on.start.emit() +# +# relation_id = self.harness.add_relation("grafana-dashboard", "grafana") +# self.harness.add_relation_unit(relation_id, "grafana/0") +# relation_data = self.harness.get_relation_data( +# relation_id, "mongodb-exporter/0" +# ) +# +# self.assertEqual("osm-mongodb", relation_data["name"]) +# self.assertTrue("dashboard" in relation_data) +# self.assertTrue(len(relation_data["dashboard"]) > 0) +# +# +# if __name__ == "__main__": +# unittest.main() diff --git a/installers/charm/mongodb-exporter/tox.ini b/installers/charm/mongodb-exporter/tox.ini index 69911728..f207ac34 100644 --- a/installers/charm/mongodb-exporter/tox.ini +++ b/installers/charm/mongodb-exporter/tox.ini @@ -18,64 +18,107 @@ # To get in touch with the maintainers, please contact: # osm-charmers@lists.launchpad.net ## +####################################################################################### [tox] -skipsdist = True -envlist = unit, lint -sitepackages = False -skip_missing_interpreters = False +envlist = black, cover, flake8, pylint, yamllint, safety +skipsdist = true + +[tox:jenkins] +toxworkdir = /tmp/.tox [testenv] -basepython = python3 +basepython = python3.8 +setenv = VIRTUAL_ENV={envdir} + PYTHONDONTWRITEBYTECODE = 1 +deps = -r{toxinidir}/requirements.txt + + +####################################################################################### +[testenv:black] +deps = black +commands = + black --check --diff src/ tests/ + + +####################################################################################### +[testenv:cover] +deps = {[testenv]deps} + -r{toxinidir}/requirements-test.txt + coverage + nose2 +commands = + sh -c 'rm -f nosetests.xml' + coverage erase + nose2 -C --coverage src + coverage report --omit='*tests*' + coverage html -d ./cover --omit='*tests*' + coverage xml -o coverage.xml --omit=*tests* +whitelist_externals = sh + + +####################################################################################### +[testenv:flake8] +deps = flake8 + flake8-import-order +commands = + flake8 src/ tests/ + + +####################################################################################### +[testenv:pylint] +deps = {[testenv]deps} + -r{toxinidir}/requirements-test.txt + pylint +commands = + pylint -E src/ tests/ + + +####################################################################################### +[testenv:safety] setenv = - PYTHONHASHSEED=0 - PYTHONPATH = {toxinidir}/src - CHARM_NAME = mongodb-exporter + LC_ALL=C.UTF-8 + LANG=C.UTF-8 +deps = {[testenv]deps} + safety +commands = + - safety check --full-report + +####################################################################################### +[testenv:yamllint] +deps = {[testenv]deps} + -r{toxinidir}/requirements-test.txt + yamllint +commands = yamllint . + +####################################################################################### [testenv:build] passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY +deps = charmcraft whitelist_externals = charmcraft - rm - unzip + cp commands = - rm -rf release mongodb-exporter.charm charmcraft build - unzip mongodb-exporter.charm -d release + cp -r build release -[testenv:unit] -commands = - coverage erase - stestr run --slowest --test-path=./tests --top-dir=./ - coverage combine - coverage html -d cover - coverage xml -o cover/coverage.xml - coverage report -deps = - coverage - stestr - mock - ops -setenv = - {[testenv]setenv} - PYTHON=coverage run - -[testenv:lint] -deps = - black - yamllint - flake8 -commands = - black --check --diff . --exclude "build/|.tox/|mod/|lib/" - yamllint . - flake8 . --max-line-length=100 --ignore="E501,W503,W504,F722" --exclude "build/ .tox/ mod/ lib/" - -[coverage:run] -branch = True -concurrency = multiprocessing -parallel = True -source = - . -omit = - .tox/* - tests/* +####################################################################################### +[flake8] +ignore = + W291, + W293, + W503, + E123, + E125, + E226, + E241, +exclude = + .git, + __pycache__, + .tox, +max-line-length = 120 +show-source = True +builtins = _ +max-complexity = 10 +import-order-style = google diff --git a/installers/charm/mysqld-exporter/.gitignore b/installers/charm/mysqld-exporter/.gitignore index 3ca6f3ae..2885df27 100644 --- a/installers/charm/mysqld-exporter/.gitignore +++ b/installers/charm/mysqld-exporter/.gitignore @@ -22,7 +22,9 @@ venv .vscode build -grafana.charm +*.charm .coverage +coverage.xml .stestr cover +release \ No newline at end of file diff --git a/installers/charm/mysqld-exporter/.jujuignore b/installers/charm/mysqld-exporter/.jujuignore new file mode 100644 index 00000000..3ae3e7dc --- /dev/null +++ b/installers/charm/mysqld-exporter/.jujuignore @@ -0,0 +1,34 @@ +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact: legal@canonical.com +# +# To get in touch with the maintainers, please contact: +# osm-charmers@lists.launchpad.net +## + +venv +.vscode +build +*.charm +.coverage +coverage.xml +.gitignore +.stestr +cover +release +tests/ +requirements* +tox.ini diff --git a/installers/charm/mysqld-exporter/.yamllint.yaml b/installers/charm/mysqld-exporter/.yamllint.yaml index f300159a..d71fb69f 100644 --- a/installers/charm/mysqld-exporter/.yamllint.yaml +++ b/installers/charm/mysqld-exporter/.yamllint.yaml @@ -28,4 +28,7 @@ yaml-files: - ".yamllint" ignore: | .tox + cover/ build/ + venv + release/ diff --git a/installers/charm/mysqld-exporter/config.yaml b/installers/charm/mysqld-exporter/config.yaml index 8d3703e4..f1192a15 100644 --- a/installers/charm/mysqld-exporter/config.yaml +++ b/installers/charm/mysqld-exporter/config.yaml @@ -41,3 +41,6 @@ options: type: string description: Name of the cluster issuer for TLS certificates default: "" + mysql_uri: + type: string + description: MySQL URI (external database) diff --git a/installers/charm/mysqld-exporter/requirements-test.txt b/installers/charm/mysqld-exporter/requirements-test.txt new file mode 100644 index 00000000..316f6d20 --- /dev/null +++ b/installers/charm/mysqld-exporter/requirements-test.txt @@ -0,0 +1,21 @@ +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact: legal@canonical.com +# +# To get in touch with the maintainers, please contact: +# osm-charmers@lists.launchpad.net + +mock==4.0.3 diff --git a/installers/charm/mysqld-exporter/requirements.txt b/installers/charm/mysqld-exporter/requirements.txt index 884cf9f9..8bb93ad3 100644 --- a/installers/charm/mysqld-exporter/requirements.txt +++ b/installers/charm/mysqld-exporter/requirements.txt @@ -19,5 +19,4 @@ # osm-charmers@lists.launchpad.net ## -ops -git+https://github.com/juju-solutions/resource-oci-image/@c5778285d332edf3d9a538f9d0c06154b7ec1b0b#egg=oci-image +git+https://github.com/charmed-osm/ops-lib-charmed-osm/@master diff --git a/installers/charm/mysqld-exporter/src/charm.py b/installers/charm/mysqld-exporter/src/charm.py index 1f42dc76..2ae7d837 100755 --- a/installers/charm/mysqld-exporter/src/charm.py +++ b/installers/charm/mysqld-exporter/src/charm.py @@ -20,210 +20,224 @@ # osm-charmers@lists.launchpad.net ## +# pylint: disable=E0213 + +from ipaddress import ip_network import logging from pathlib import Path -from typing import Dict, List, NoReturn +from typing import NoReturn, Optional from urllib.parse import urlparse -from ops.charm import CharmBase -from ops.framework import EventBase, StoredState from ops.main import main -from ops.model import ActiveStatus, Application, BlockedStatus, MaintenanceStatus, Unit -from oci_image import OCIImageResource, OCIImageResourceError +from opslib.osm.charm import CharmedOsmBase, RelationsMissing +from opslib.osm.interfaces.grafana import GrafanaDashboardTarget +from opslib.osm.interfaces.mysql import MysqlClient +from opslib.osm.interfaces.prometheus import PrometheusScrapeTarget +from opslib.osm.pod import ( + ContainerV3Builder, + IngressResourceV3Builder, + PodSpecV3Builder, +) +from opslib.osm.validator import ModelValidator, validator -from pod_spec import make_pod_spec logger = logging.getLogger(__name__) -MYSQLD_EXPORTER_PORT = 9104 - +PORT = 9104 -class RelationsMissing(Exception): - def __init__(self, missing_relations: List): - self.message = "" - if missing_relations and isinstance(missing_relations, list): - self.message += f'Waiting for {", ".join(missing_relations)} relation' - if "," in self.message: - self.message += "s" +class ConfigModel(ModelValidator): + site_url: Optional[str] + cluster_issuer: Optional[str] + ingress_whitelist_source_range: Optional[str] + tls_secret_name: Optional[str] + mysql_uri: Optional[str] -class RelationDefinition: - def __init__(self, relation_name: str, keys: List, source_type): - if source_type != Application and source_type != Unit: - raise TypeError( - "source_type should be ops.model.Application or ops.model.Unit" - ) - self.relation_name = relation_name - self.keys = keys - self.source_type = source_type - - -def check_missing_relation_data( - data: Dict, - expected_relations_data: List[RelationDefinition], -): - missing_relations = [] - for relation_data in expected_relations_data: - if not all( - f"{relation_data.relation_name}_{k}" in data for k in relation_data.keys - ): - missing_relations.append(relation_data.relation_name) - if missing_relations: - raise RelationsMissing(missing_relations) - - -def get_relation_data( - charm: CharmBase, - relation_data: RelationDefinition, -) -> Dict: - data = {} - relation = charm.model.get_relation(relation_data.relation_name) - if relation: - self_app_unit = ( - charm.app if relation_data.source_type == Application else charm.unit - ) - expected_type = relation_data.source_type - for app_unit in relation.data: - if app_unit != self_app_unit and isinstance(app_unit, expected_type): - if all(k in relation.data[app_unit] for k in relation_data.keys): - for k in relation_data.keys: - data[f"{relation_data.relation_name}_{k}"] = relation.data[ - app_unit - ].get(k) - break - return data + @validator("site_url") + def validate_site_url(cls, v): + if v: + parsed = urlparse(v) + if not parsed.scheme.startswith("http"): + raise ValueError("value must start with http") + return v + @validator("ingress_whitelist_source_range") + def validate_ingress_whitelist_source_range(cls, v): + if v: + ip_network(v) + return v -class MysqldExporterCharm(CharmBase): - """Mysqld Exporter Charm.""" + @validator("mysql_uri") + def validate_mysql_uri(cls, v): + if v and not v.startswith("mysql://"): + raise ValueError("mysql_uri is not properly formed") + return v - state = StoredState() +class MysqlExporterCharm(CharmedOsmBase): def __init__(self, *args) -> NoReturn: - """Mysqld Exporter Charm constructor.""" - super().__init__(*args) - - # Internal state initialization - self.state.set_default(pod_spec=None) - - self.port = MYSQLD_EXPORTER_PORT - self.image = OCIImageResource(self, "image") - - # Registering regular events - self.framework.observe(self.on.start, self.configure_pod) - self.framework.observe(self.on.config_changed, self.configure_pod) - - # Registering required relation events - self.framework.observe(self.on.mysql_relation_changed, self.configure_pod) + super().__init__(*args, oci_image="image") - # Registering required relation departed events - self.framework.observe(self.on.mysql_relation_departed, self.configure_pod) + # Provision Kafka relation to exchange information + self.mysql_client = MysqlClient(self, "mysql") + self.framework.observe(self.on["mysql"].relation_changed, self.configure_pod) + self.framework.observe(self.on["mysql"].relation_broken, self.configure_pod) - # Registering provided relation events + # Register relation to provide a Scraping Target + self.scrape_target = PrometheusScrapeTarget(self, "prometheus-scrape") self.framework.observe( - self.on.prometheus_scrape_relation_joined, self._publish_scrape_info + self.on["prometheus-scrape"].relation_joined, self._publish_scrape_info ) + + # Register relation to provide a Dasboard Target + self.dashboard_target = GrafanaDashboardTarget(self, "grafana-dashboard") self.framework.observe( - self.on.grafana_dashboard_relation_joined, self._publish_dashboard_info + self.on["grafana-dashboard"].relation_joined, self._publish_dashboard_info ) - def _publish_scrape_info(self, event: EventBase) -> NoReturn: - """Publishes scrape information. + def _publish_scrape_info(self, event) -> NoReturn: + """Publishes scraping information for Prometheus. Args: - event (EventBase): Exporter relation event. + event (EventBase): Prometheus relation event. """ - rel_data = { - "hostname": urlparse(self.model.config["site_url"]).hostname - if self.model.config["site_url"] - else self.model.app.name, - "port": "80" - if self.model.config["site_url"] - else str(MYSQLD_EXPORTER_PORT), - "metrics_path": "/metrics", - "scrape_interval": "30s", - "scrape_timeout": "15s", - } - for k, v in rel_data.items(): - event.relation.data[self.unit][k] = v - - def _publish_dashboard_info(self, event: EventBase) -> NoReturn: - """Publishes dashboard information. + if self.unit.is_leader(): + hostname = ( + urlparse(self.model.config["site_url"]).hostname + if self.model.config["site_url"] + else self.model.app.name + ) + port = str(PORT) + if self.model.config.get("site_url", "").startswith("https://"): + port = "443" + elif self.model.config.get("site_url", "").startswith("http://"): + port = "80" + + self.scrape_target.publish_info( + hostname=hostname, + port=port, + metrics_path="/metrics", + scrape_interval="30s", + scrape_timeout="15s", + ) + + def _publish_dashboard_info(self, event) -> NoReturn: + """Publish dashboards for Grafana. Args: - event (EventBase): Exporter relation event. + event (EventBase): Grafana relation event. """ - rel_data = { - "name": "osm-mysql", - "dashboard": Path("files/mysql_exporter_dashboard.json").read_text(), - } - for k, v in rel_data.items(): - event.relation.data[self.unit][k] = v - - @property - def relations_requirements(self): - return [ - RelationDefinition( - "mysql", ["host", "port", "user", "password", "root_password"], Unit + if self.unit.is_leader(): + self.dashboard_target.publish_info( + name="osm-mysql", + dashboard=Path("files/mysql_exporter_dashboard.json").read_text(), ) - ] - def get_relation_state(self): - relation_state = {} - for relation_requirements in self.relations_requirements: - data = get_relation_data(self, relation_requirements) - relation_state = {**relation_state, **data} - check_missing_relation_data(relation_state, self.relations_requirements) - return relation_state + def _check_missing_dependencies(self, config: ConfigModel): + """Check if there is any relation missing. + + Args: + config (ConfigModel): object with configuration information. + + Raises: + RelationsMissing: if kafka is missing. + """ + missing_relations = [] - def configure_pod(self, _=None) -> NoReturn: - """Assemble the pod spec and apply it, if possible. + if not config.mysql_uri and self.mysql_client.is_missing_data_in_unit(): + missing_relations.append("mysql") + + if missing_relations: + raise RelationsMissing(missing_relations) + + def build_pod_spec(self, image_info): + """Build the PodSpec to be used. Args: - event (EventBase): Hook or Relation event that started the - function. + image_info (str): container image information. + + Returns: + Dict: PodSpec information. """ - if not self.unit.is_leader(): - self.unit.status = ActiveStatus("ready") - return - - relation_state = None - try: - relation_state = self.get_relation_state() - except RelationsMissing as exc: - logger.exception("Relation missing error") - self.unit.status = BlockedStatus(exc.message) - return - - self.unit.status = MaintenanceStatus("Assembling pod spec") - - # Fetch image information - try: - self.unit.status = MaintenanceStatus("Fetching image information") - image_info = self.image.fetch() - except OCIImageResourceError: - self.unit.status = BlockedStatus("Error fetching image information") - return - - try: - pod_spec = make_pod_spec( - image_info, - self.model.config, - relation_state, - self.model.app.name, - self.port, + # Validate config + config = ConfigModel(**dict(self.config)) + + if config.mysql_uri and not self.mysql_client.is_missing_data_in_unit(): + raise Exception("Mysql data cannot be provided via config and relation") + + # Check relations + self._check_missing_dependencies(config) + + # Create Builder for the PodSpec + pod_spec_builder = PodSpecV3Builder() + + # Build container + container_builder = ContainerV3Builder(self.app.name, image_info) + container_builder.add_port(name=self.app.name, port=PORT) + container_builder.add_http_readiness_probe( + path="/api/health", + port=PORT, + initial_delay_seconds=10, + period_seconds=10, + timeout_seconds=5, + success_threshold=1, + failure_threshold=3, + ) + container_builder.add_http_liveness_probe( + path="/api/health", + port=PORT, + initial_delay_seconds=60, + timeout_seconds=30, + failure_threshold=10, + ) + + data_source = ( + config.mysql_uri.replace("mysql://", "").split("/")[0] + if config.mysql_uri + else f"root:{self.mysql_client.root_password}@{self.mysql_client.host}:{self.mysql_client.port}" + ) + + container_builder.add_envs( + { + "DATA_SOURCE_NAME": data_source, + } + ) + container = container_builder.build() + + # Add container to PodSpec + pod_spec_builder.add_container(container) + + # Add ingress resources to PodSpec if site url exists + if config.site_url: + parsed = urlparse(config.site_url) + annotations = {} + ingress_resource_builder = IngressResourceV3Builder( + f"{self.app.name}-ingress", annotations ) - except ValueError as exc: - logger.exception("Config/Relation data validation error") - self.unit.status = BlockedStatus(str(exc)) - return - if self.state.pod_spec != pod_spec: - self.model.pod.set_spec(pod_spec) - self.state.pod_spec = pod_spec + if config.ingress_whitelist_source_range: + annotations[ + "nginx.ingress.kubernetes.io/whitelist-source-range" + ] = config.ingress_whitelist_source_range + + if config.cluster_issuer: + annotations["cert-manager.io/cluster-issuer"] = config.cluster_issuer + + if parsed.scheme == "https": + ingress_resource_builder.add_tls( + [parsed.hostname], config.tls_secret_name + ) + else: + annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false" + + ingress_resource_builder.add_rule(parsed.hostname, self.app.name, PORT) + ingress_resource = ingress_resource_builder.build() + pod_spec_builder.add_ingress_resource(ingress_resource) + + logger.debug(pod_spec_builder.build()) - self.unit.status = ActiveStatus("ready") + return pod_spec_builder.build() if __name__ == "__main__": - main(MysqldExporterCharm) + main(MysqlExporterCharm) diff --git a/installers/charm/mysqld-exporter/src/pod_spec.py b/installers/charm/mysqld-exporter/src/pod_spec.py index e371030c..8068be7f 100644 --- a/installers/charm/mysqld-exporter/src/pod_spec.py +++ b/installers/charm/mysqld-exporter/src/pod_spec.py @@ -20,8 +20,8 @@ # osm-charmers@lists.launchpad.net ## -import logging from ipaddress import ip_network +import logging from typing import Any, Dict, List from urllib.parse import urlparse diff --git a/installers/charm/mysqld-exporter/tests/__init__.py b/installers/charm/mysqld-exporter/tests/__init__.py index 4fd849a5..90dc417c 100644 --- a/installers/charm/mysqld-exporter/tests/__init__.py +++ b/installers/charm/mysqld-exporter/tests/__init__.py @@ -23,9 +23,17 @@ """Init mocking for unit tests.""" import sys + import mock + +class OCIImageResourceErrorMock(Exception): + pass + + sys.path.append("src") oci_image = mock.MagicMock() +oci_image.OCIImageResourceError = OCIImageResourceErrorMock sys.modules["oci_image"] = oci_image +sys.modules["oci_image"].OCIImageResource().fetch.return_value = {} diff --git a/installers/charm/mysqld-exporter/tests/test_charm.py b/installers/charm/mysqld-exporter/tests/test_charm.py index 1d6a7e63..ddaacaf3 100644 --- a/installers/charm/mysqld-exporter/tests/test_charm.py +++ b/installers/charm/mysqld-exporter/tests/test_charm.py @@ -20,481 +20,576 @@ # osm-charmers@lists.launchpad.net ## +import sys from typing import NoReturn import unittest -from ops.model import BlockedStatus +from charm import MysqlExporterCharm +from ops.model import ActiveStatus, BlockedStatus from ops.testing import Harness -from charm import MysqldExporterCharm - class TestCharm(unittest.TestCase): """Mysql Exporter Charm unit tests.""" def setUp(self) -> NoReturn: """Test setup""" - self.harness = Harness(MysqldExporterCharm) + self.image_info = sys.modules["oci_image"].OCIImageResource().fetch() + self.harness = Harness(MysqlExporterCharm) self.harness.set_leader(is_leader=True) self.harness.begin() + self.config = { + "ingress_whitelist_source_range": "", + "tls_secret_name": "", + "site_url": "https://mysql-exporter.192.168.100.100.nip.io", + "cluster_issuer": "vault-issuer", + } + self.harness.update_config(self.config) - def test_on_start_without_relations(self) -> NoReturn: - """Test installation without any relation.""" - self.harness.charm.on.start.emit() + def test_config_changed_no_relations( + self, + ) -> NoReturn: + """Test ingress resources without HTTP.""" - # Verifying status - self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus) + self.harness.charm.on.config_changed.emit() - # Verifying status message - self.assertGreater(len(self.harness.charm.unit.status.message), 0) + # Assertions + self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus) + print(self.harness.charm.unit.status.message) self.assertTrue( - self.harness.charm.unit.status.message.startswith("Waiting for ") + all( + relation in self.harness.charm.unit.status.message + for relation in ["mysql"] + ) ) - self.assertIn("mysql", self.harness.charm.unit.status.message) - self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation")) - def test_on_start_with_relations_without_http(self) -> NoReturn: - """Test deployment.""" - expected_result = { - "version": 3, - "containers": [ - { - "name": "mysqld-exporter", - "imageDetails": self.harness.charm.image.fetch(), - "imagePullPolicy": "Always", - "ports": [ - { - "name": "mysqld-exporter", - "containerPort": 9104, - "protocol": "TCP", - } - ], - "envConfig": {"DATA_SOURCE_NAME": "root:rootpw@(mysql:3306)/"}, - "kubernetes": { - "readinessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9104, - }, - "initialDelaySeconds": 10, - "periodSeconds": 10, - "timeoutSeconds": 5, - "successThreshold": 1, - "failureThreshold": 3, - }, - "livenessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9104, - }, - "initialDelaySeconds": 60, - "timeoutSeconds": 30, - "failureThreshold": 10, - }, - }, - }, - ], - "kubernetesResources": {"ingressResources": []}, - } + def test_config_changed_non_leader( + self, + ) -> NoReturn: + """Test ingress resources without HTTP.""" + self.harness.set_leader(is_leader=False) + self.harness.charm.on.config_changed.emit() - self.harness.charm.on.start.emit() + # Assertions + self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus) - # Initializing the mysql relation - relation_id = self.harness.add_relation("mysql", "mysql") - self.harness.add_relation_unit(relation_id, "mysql/0") - self.harness.update_relation_data( - relation_id, - "mysql/0", - { - "host": "mysql", - "port": "3306", - "user": "mano", - "password": "manopw", - "root_password": "rootpw", - }, - ) + def test_with_relations( + self, + ) -> NoReturn: + "Test with relations" + self.initialize_mysql_relation() # Verifying status self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus) - pod_spec, _ = self.harness.get_pod_spec() - - self.assertDictEqual(expected_result, pod_spec) - - def test_ingress_resources_with_http(self) -> NoReturn: - """Test ingress resources with HTTP.""" - expected_result = { - "version": 3, - "containers": [ - { - "name": "mysqld-exporter", - "imageDetails": self.harness.charm.image.fetch(), - "imagePullPolicy": "Always", - "ports": [ - { - "name": "mysqld-exporter", - "containerPort": 9104, - "protocol": "TCP", - } - ], - "envConfig": {"DATA_SOURCE_NAME": "root:rootpw@(mysql:3306)/"}, - "kubernetes": { - "readinessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9104, - }, - "initialDelaySeconds": 10, - "periodSeconds": 10, - "timeoutSeconds": 5, - "successThreshold": 1, - "failureThreshold": 3, - }, - "livenessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9104, - }, - "initialDelaySeconds": 60, - "timeoutSeconds": 30, - "failureThreshold": 10, - }, - }, - }, - ], - "kubernetesResources": { - "ingressResources": [ - { - "name": "mysqld-exporter-ingress", - "annotations": { - "nginx.ingress.kubernetes.io/ssl-redirect": "false", - }, - "spec": { - "rules": [ - { - "host": "mysqld-exporter", - "http": { - "paths": [ - { - "path": "/", - "backend": { - "serviceName": "mysqld-exporter", - "servicePort": 9104, - }, - } - ] - }, - } - ] - }, - } - ], - }, - } - - self.harness.charm.on.start.emit() - - # Initializing the mysql relation - relation_id = self.harness.add_relation("mysql", "mysql") - self.harness.add_relation_unit(relation_id, "mysql/0") - self.harness.update_relation_data( - relation_id, - "mysql/0", - { - "host": "mysql", - "port": "3306", - "user": "mano", - "password": "manopw", - "root_password": "rootpw", - }, - ) - - self.harness.update_config({"site_url": "http://mysqld-exporter"}) - - pod_spec, _ = self.harness.get_pod_spec() - - self.assertDictEqual(expected_result, pod_spec) - - def test_ingress_resources_with_https(self) -> NoReturn: - """Test ingress resources with HTTPS.""" - expected_result = { - "version": 3, - "containers": [ - { - "name": "mysqld-exporter", - "imageDetails": self.harness.charm.image.fetch(), - "imagePullPolicy": "Always", - "ports": [ - { - "name": "mysqld-exporter", - "containerPort": 9104, - "protocol": "TCP", - } - ], - "envConfig": {"DATA_SOURCE_NAME": "root:rootpw@(mysql:3306)/"}, - "kubernetes": { - "readinessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9104, - }, - "initialDelaySeconds": 10, - "periodSeconds": 10, - "timeoutSeconds": 5, - "successThreshold": 1, - "failureThreshold": 3, - }, - "livenessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9104, - }, - "initialDelaySeconds": 60, - "timeoutSeconds": 30, - "failureThreshold": 10, - }, - }, - }, - ], - "kubernetesResources": { - "ingressResources": [ - { - "name": "mysqld-exporter-ingress", - "annotations": {}, - "spec": { - "rules": [ - { - "host": "mysqld-exporter", - "http": { - "paths": [ - { - "path": "/", - "backend": { - "serviceName": "mysqld-exporter", - "servicePort": 9104, - }, - } - ] - }, - } - ], - "tls": [ - { - "hosts": ["mysqld-exporter"], - "secretName": "mysqld-exporter", - } - ], - }, - } - ], - }, - } - - self.harness.charm.on.start.emit() - - # Initializing the mysql relation - relation_id = self.harness.add_relation("mysql", "mysql") - self.harness.add_relation_unit(relation_id, "mysql/0") - self.harness.update_relation_data( - relation_id, - "mysql/0", - { - "host": "mysql", - "port": "3306", - "user": "mano", - "password": "manopw", - "root_password": "rootpw", - }, - ) - - self.harness.update_config( - { - "site_url": "https://mysqld-exporter", - "tls_secret_name": "mysqld-exporter", - } - ) - - pod_spec, _ = self.harness.get_pod_spec() - - self.assertDictEqual(expected_result, pod_spec) - - def test_ingress_resources_with_https_and_ingress_whitelist(self) -> NoReturn: - """Test ingress resources with HTTPS and ingress whitelist.""" - expected_result = { - "version": 3, - "containers": [ - { - "name": "mysqld-exporter", - "imageDetails": self.harness.charm.image.fetch(), - "imagePullPolicy": "Always", - "ports": [ - { - "name": "mysqld-exporter", - "containerPort": 9104, - "protocol": "TCP", - } - ], - "envConfig": {"DATA_SOURCE_NAME": "root:rootpw@(mysql:3306)/"}, - "kubernetes": { - "readinessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9104, - }, - "initialDelaySeconds": 10, - "periodSeconds": 10, - "timeoutSeconds": 5, - "successThreshold": 1, - "failureThreshold": 3, - }, - "livenessProbe": { - "httpGet": { - "path": "/api/health", - "port": 9104, - }, - "initialDelaySeconds": 60, - "timeoutSeconds": 30, - "failureThreshold": 10, - }, - }, - }, - ], - "kubernetesResources": { - "ingressResources": [ - { - "name": "mysqld-exporter-ingress", - "annotations": { - "nginx.ingress.kubernetes.io/whitelist-source-range": "0.0.0.0/0", - }, - "spec": { - "rules": [ - { - "host": "mysqld-exporter", - "http": { - "paths": [ - { - "path": "/", - "backend": { - "serviceName": "mysqld-exporter", - "servicePort": 9104, - }, - } - ] - }, - } - ], - "tls": [ - { - "hosts": ["mysqld-exporter"], - "secretName": "mysqld-exporter", - } - ], - }, - } - ], - }, - } - - self.harness.charm.on.start.emit() - - # Initializing the mysql relation - relation_id = self.harness.add_relation("mysql", "mysql") - self.harness.add_relation_unit(relation_id, "mysql/0") - self.harness.update_relation_data( - relation_id, - "mysql/0", - { - "host": "mysql", - "port": "3306", - "user": "mano", - "password": "manopw", - "root_password": "rootpw", - }, - ) - - self.harness.update_config( - { - "site_url": "https://mysqld-exporter", - "tls_secret_name": "mysqld-exporter", - "ingress_whitelist_source_range": "0.0.0.0/0", - } - ) + def test_with_config( + self, + ) -> NoReturn: + "Test with config" + self.initialize_mysql_relation() - pod_spec, _ = self.harness.get_pod_spec() + # Verifying status + self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus) - self.assertDictEqual(expected_result, pod_spec) + def test_mysql_exception_relation_and_config( + self, + ) -> NoReturn: + self.initialize_mysql_config() + self.initialize_mysql_relation() - def test_on_mysql_unit_relation_changed(self) -> NoReturn: - """Test to see if mysql relation is updated.""" - self.harness.charm.on.start.emit() + # Verifying status + self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus) - relation_id = self.harness.add_relation("mysql", "mysql") - self.harness.add_relation_unit(relation_id, "mysql/0") + def initialize_mysql_relation(self): + mongodb_relation_id = self.harness.add_relation("mysql", "mysql") + self.harness.add_relation_unit(mongodb_relation_id, "mysql/0") self.harness.update_relation_data( - relation_id, + mongodb_relation_id, "mysql/0", { - "host": "mysql", - "port": "3306", - "user": "mano", - "password": "manopw", - "root_password": "rootpw", + "user": "user", + "password": "pass", + "host": "host", + "port": "1234", + "database": "pol", + "root_password": "root_password", }, ) - # Verifying status - self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus) - - def test_publish_target_info(self) -> NoReturn: - """Test to see if target relation is updated.""" - expected_result = { - "hostname": "mysqld-exporter", - "port": "9104", - "metrics_path": "/metrics", - "scrape_interval": "30s", - "scrape_timeout": "15s", - } - - self.harness.charm.on.start.emit() - - relation_id = self.harness.add_relation("prometheus-scrape", "prometheus") - self.harness.add_relation_unit(relation_id, "prometheus/0") - relation_data = self.harness.get_relation_data(relation_id, "mysqld-exporter/0") - - self.assertDictEqual(expected_result, relation_data) - - def test_publish_scrape_info_with_site_url(self) -> NoReturn: - """Test to see if target relation is updated.""" - expected_result = { - "hostname": "mysqld-exporter-osm", - "port": "80", - "metrics_path": "/metrics", - "scrape_interval": "30s", - "scrape_timeout": "15s", - } - - self.harness.charm.on.start.emit() - - self.harness.update_config({"site_url": "http://mysqld-exporter-osm"}) - - relation_id = self.harness.add_relation("prometheus-scrape", "prometheus") - self.harness.add_relation_unit(relation_id, "prometheus/0") - relation_data = self.harness.get_relation_data(relation_id, "mysqld-exporter/0") - - self.assertDictEqual(expected_result, relation_data) - - def test_publish_dashboard_info(self) -> NoReturn: - """Test to see if dashboard relation is updated.""" - self.harness.charm.on.start.emit() - - relation_id = self.harness.add_relation("grafana-dashboard", "grafana") - self.harness.add_relation_unit(relation_id, "grafana/0") - relation_data = self.harness.get_relation_data(relation_id, "mysqld-exporter/0") - - self.assertTrue("dashboard" in relation_data) - self.assertTrue(len(relation_data["dashboard"]) > 0) - self.assertEqual(relation_data["name"], "osm-mysql") + def initialize_mysql_config(self): + self.harness.update_config({"mysql_uri": "mysql://user:pass@mysql-host:3306"}) if __name__ == "__main__": unittest.main() + + +# class TestCharm(unittest.TestCase): +# """Mysql Exporter Charm unit tests.""" +# +# def setUp(self) -> NoReturn: +# """Test setup""" +# self.harness = Harness(MysqldExporterCharm) +# self.harness.set_leader(is_leader=True) +# self.harness.begin() +# +# def test_on_start_without_relations(self) -> NoReturn: +# """Test installation without any relation.""" +# self.harness.charm.on.start.emit() +# +# # Verifying status +# self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus) +# +# # Verifying status message +# self.assertGreater(len(self.harness.charm.unit.status.message), 0) +# self.assertTrue( +# self.harness.charm.unit.status.message.startswith("Waiting for ") +# ) +# self.assertIn("mysql", self.harness.charm.unit.status.message) +# self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation")) +# +# def test_on_start_with_relations_without_http(self) -> NoReturn: +# """Test deployment.""" +# expected_result = { +# "version": 3, +# "containers": [ +# { +# "name": "mysqld-exporter", +# "imageDetails": self.harness.charm.image.fetch(), +# "imagePullPolicy": "Always", +# "ports": [ +# { +# "name": "mysqld-exporter", +# "containerPort": 9104, +# "protocol": "TCP", +# } +# ], +# "envConfig": {"DATA_SOURCE_NAME": "root:rootpw@(mysql:3306)/"}, +# "kubernetes": { +# "readinessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9104, +# }, +# "initialDelaySeconds": 10, +# "periodSeconds": 10, +# "timeoutSeconds": 5, +# "successThreshold": 1, +# "failureThreshold": 3, +# }, +# "livenessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9104, +# }, +# "initialDelaySeconds": 60, +# "timeoutSeconds": 30, +# "failureThreshold": 10, +# }, +# }, +# }, +# ], +# "kubernetesResources": {"ingressResources": []}, +# } +# +# self.harness.charm.on.start.emit() +# +# # Initializing the mysql relation +# relation_id = self.harness.add_relation("mysql", "mysql") +# self.harness.add_relation_unit(relation_id, "mysql/0") +# self.harness.update_relation_data( +# relation_id, +# "mysql/0", +# { +# "host": "mysql", +# "port": "3306", +# "user": "mano", +# "password": "manopw", +# "root_password": "rootpw", +# }, +# ) +# +# # Verifying status +# self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus) +# +# pod_spec, _ = self.harness.get_pod_spec() +# +# self.assertDictEqual(expected_result, pod_spec) +# +# def test_ingress_resources_with_http(self) -> NoReturn: +# """Test ingress resources with HTTP.""" +# expected_result = { +# "version": 3, +# "containers": [ +# { +# "name": "mysqld-exporter", +# "imageDetails": self.harness.charm.image.fetch(), +# "imagePullPolicy": "Always", +# "ports": [ +# { +# "name": "mysqld-exporter", +# "containerPort": 9104, +# "protocol": "TCP", +# } +# ], +# "envConfig": {"DATA_SOURCE_NAME": "root:rootpw@(mysql:3306)/"}, +# "kubernetes": { +# "readinessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9104, +# }, +# "initialDelaySeconds": 10, +# "periodSeconds": 10, +# "timeoutSeconds": 5, +# "successThreshold": 1, +# "failureThreshold": 3, +# }, +# "livenessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9104, +# }, +# "initialDelaySeconds": 60, +# "timeoutSeconds": 30, +# "failureThreshold": 10, +# }, +# }, +# }, +# ], +# "kubernetesResources": { +# "ingressResources": [ +# { +# "name": "mysqld-exporter-ingress", +# "annotations": { +# "nginx.ingress.kubernetes.io/ssl-redirect": "false", +# }, +# "spec": { +# "rules": [ +# { +# "host": "mysqld-exporter", +# "http": { +# "paths": [ +# { +# "path": "/", +# "backend": { +# "serviceName": "mysqld-exporter", +# "servicePort": 9104, +# }, +# } +# ] +# }, +# } +# ] +# }, +# } +# ], +# }, +# } +# +# self.harness.charm.on.start.emit() +# +# # Initializing the mysql relation +# relation_id = self.harness.add_relation("mysql", "mysql") +# self.harness.add_relation_unit(relation_id, "mysql/0") +# self.harness.update_relation_data( +# relation_id, +# "mysql/0", +# { +# "host": "mysql", +# "port": "3306", +# "user": "mano", +# "password": "manopw", +# "root_password": "rootpw", +# }, +# ) +# +# self.harness.update_config({"site_url": "http://mysqld-exporter"}) +# +# pod_spec, _ = self.harness.get_pod_spec() +# +# self.assertDictEqual(expected_result, pod_spec) +# +# def test_ingress_resources_with_https(self) -> NoReturn: +# """Test ingress resources with HTTPS.""" +# expected_result = { +# "version": 3, +# "containers": [ +# { +# "name": "mysqld-exporter", +# "imageDetails": self.harness.charm.image.fetch(), +# "imagePullPolicy": "Always", +# "ports": [ +# { +# "name": "mysqld-exporter", +# "containerPort": 9104, +# "protocol": "TCP", +# } +# ], +# "envConfig": {"DATA_SOURCE_NAME": "root:rootpw@(mysql:3306)/"}, +# "kubernetes": { +# "readinessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9104, +# }, +# "initialDelaySeconds": 10, +# "periodSeconds": 10, +# "timeoutSeconds": 5, +# "successThreshold": 1, +# "failureThreshold": 3, +# }, +# "livenessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9104, +# }, +# "initialDelaySeconds": 60, +# "timeoutSeconds": 30, +# "failureThreshold": 10, +# }, +# }, +# }, +# ], +# "kubernetesResources": { +# "ingressResources": [ +# { +# "name": "mysqld-exporter-ingress", +# "annotations": {}, +# "spec": { +# "rules": [ +# { +# "host": "mysqld-exporter", +# "http": { +# "paths": [ +# { +# "path": "/", +# "backend": { +# "serviceName": "mysqld-exporter", +# "servicePort": 9104, +# }, +# } +# ] +# }, +# } +# ], +# "tls": [ +# { +# "hosts": ["mysqld-exporter"], +# "secretName": "mysqld-exporter", +# } +# ], +# }, +# } +# ], +# }, +# } +# +# self.harness.charm.on.start.emit() +# +# # Initializing the mysql relation +# relation_id = self.harness.add_relation("mysql", "mysql") +# self.harness.add_relation_unit(relation_id, "mysql/0") +# self.harness.update_relation_data( +# relation_id, +# "mysql/0", +# { +# "host": "mysql", +# "port": "3306", +# "user": "mano", +# "password": "manopw", +# "root_password": "rootpw", +# }, +# ) +# +# self.harness.update_config( +# { +# "site_url": "https://mysqld-exporter", +# "tls_secret_name": "mysqld-exporter", +# } +# ) +# +# pod_spec, _ = self.harness.get_pod_spec() +# +# self.assertDictEqual(expected_result, pod_spec) +# +# def test_ingress_resources_with_https_and_ingress_whitelist(self) -> NoReturn: +# """Test ingress resources with HTTPS and ingress whitelist.""" +# expected_result = { +# "version": 3, +# "containers": [ +# { +# "name": "mysqld-exporter", +# "imageDetails": self.harness.charm.image.fetch(), +# "imagePullPolicy": "Always", +# "ports": [ +# { +# "name": "mysqld-exporter", +# "containerPort": 9104, +# "protocol": "TCP", +# } +# ], +# "envConfig": {"DATA_SOURCE_NAME": "root:rootpw@(mysql:3306)/"}, +# "kubernetes": { +# "readinessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9104, +# }, +# "initialDelaySeconds": 10, +# "periodSeconds": 10, +# "timeoutSeconds": 5, +# "successThreshold": 1, +# "failureThreshold": 3, +# }, +# "livenessProbe": { +# "httpGet": { +# "path": "/api/health", +# "port": 9104, +# }, +# "initialDelaySeconds": 60, +# "timeoutSeconds": 30, +# "failureThreshold": 10, +# }, +# }, +# }, +# ], +# "kubernetesResources": { +# "ingressResources": [ +# { +# "name": "mysqld-exporter-ingress", +# "annotations": { +# "nginx.ingress.kubernetes.io/whitelist-source-range": "0.0.0.0/0", +# }, +# "spec": { +# "rules": [ +# { +# "host": "mysqld-exporter", +# "http": { +# "paths": [ +# { +# "path": "/", +# "backend": { +# "serviceName": "mysqld-exporter", +# "servicePort": 9104, +# }, +# } +# ] +# }, +# } +# ], +# "tls": [ +# { +# "hosts": ["mysqld-exporter"], +# "secretName": "mysqld-exporter", +# } +# ], +# }, +# } +# ], +# }, +# } +# +# self.harness.charm.on.start.emit() +# +# # Initializing the mysql relation +# relation_id = self.harness.add_relation("mysql", "mysql") +# self.harness.add_relation_unit(relation_id, "mysql/0") +# self.harness.update_relation_data( +# relation_id, +# "mysql/0", +# { +# "host": "mysql", +# "port": "3306", +# "user": "mano", +# "password": "manopw", +# "root_password": "rootpw", +# }, +# ) +# +# self.harness.update_config( +# { +# "site_url": "https://mysqld-exporter", +# "tls_secret_name": "mysqld-exporter", +# "ingress_whitelist_source_range": "0.0.0.0/0", +# } +# ) +# +# pod_spec, _ = self.harness.get_pod_spec() +# +# self.assertDictEqual(expected_result, pod_spec) +# +# def test_on_mysql_unit_relation_changed(self) -> NoReturn: +# """Test to see if mysql relation is updated.""" +# self.harness.charm.on.start.emit() +# +# relation_id = self.harness.add_relation("mysql", "mysql") +# self.harness.add_relation_unit(relation_id, "mysql/0") +# self.harness.update_relation_data( +# relation_id, +# "mysql/0", +# { +# "host": "mysql", +# "port": "3306", +# "user": "mano", +# "password": "manopw", +# "root_password": "rootpw", +# }, +# ) +# +# # Verifying status +# self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus) +# +# def test_publish_target_info(self) -> NoReturn: +# """Test to see if target relation is updated.""" +# expected_result = { +# "hostname": "mysqld-exporter", +# "port": "9104", +# "metrics_path": "/metrics", +# "scrape_interval": "30s", +# "scrape_timeout": "15s", +# } +# +# self.harness.charm.on.start.emit() +# +# relation_id = self.harness.add_relation("prometheus-scrape", "prometheus") +# self.harness.add_relation_unit(relation_id, "prometheus/0") +# relation_data = self.harness.get_relation_data(relation_id, "mysqld-exporter/0") +# +# self.assertDictEqual(expected_result, relation_data) +# +# def test_publish_scrape_info_with_site_url(self) -> NoReturn: +# """Test to see if target relation is updated.""" +# expected_result = { +# "hostname": "mysqld-exporter-osm", +# "port": "80", +# "metrics_path": "/metrics", +# "scrape_interval": "30s", +# "scrape_timeout": "15s", +# } +# +# self.harness.charm.on.start.emit() +# +# self.harness.update_config({"site_url": "http://mysqld-exporter-osm"}) +# +# relation_id = self.harness.add_relation("prometheus-scrape", "prometheus") +# self.harness.add_relation_unit(relation_id, "prometheus/0") +# relation_data = self.harness.get_relation_data(relation_id, "mysqld-exporter/0") +# +# self.assertDictEqual(expected_result, relation_data) +# +# def test_publish_dashboard_info(self) -> NoReturn: +# """Test to see if dashboard relation is updated.""" +# self.harness.charm.on.start.emit() +# +# relation_id = self.harness.add_relation("grafana-dashboard", "grafana") +# self.harness.add_relation_unit(relation_id, "grafana/0") +# relation_data = self.harness.get_relation_data(relation_id, "mysqld-exporter/0") +# +# self.assertTrue("dashboard" in relation_data) +# self.assertTrue(len(relation_data["dashboard"]) > 0) +# self.assertEqual(relation_data["name"], "osm-mysql") +# +# +# if __name__ == "__main__": +# unittest.main() diff --git a/installers/charm/mysqld-exporter/tox.ini b/installers/charm/mysqld-exporter/tox.ini index bfbc04a5..f207ac34 100644 --- a/installers/charm/mysqld-exporter/tox.ini +++ b/installers/charm/mysqld-exporter/tox.ini @@ -18,64 +18,107 @@ # To get in touch with the maintainers, please contact: # osm-charmers@lists.launchpad.net ## +####################################################################################### [tox] -skipsdist = True -envlist = unit, lint -sitepackages = False -skip_missing_interpreters = False +envlist = black, cover, flake8, pylint, yamllint, safety +skipsdist = true + +[tox:jenkins] +toxworkdir = /tmp/.tox [testenv] -basepython = python3 +basepython = python3.8 +setenv = VIRTUAL_ENV={envdir} + PYTHONDONTWRITEBYTECODE = 1 +deps = -r{toxinidir}/requirements.txt + + +####################################################################################### +[testenv:black] +deps = black +commands = + black --check --diff src/ tests/ + + +####################################################################################### +[testenv:cover] +deps = {[testenv]deps} + -r{toxinidir}/requirements-test.txt + coverage + nose2 +commands = + sh -c 'rm -f nosetests.xml' + coverage erase + nose2 -C --coverage src + coverage report --omit='*tests*' + coverage html -d ./cover --omit='*tests*' + coverage xml -o coverage.xml --omit=*tests* +whitelist_externals = sh + + +####################################################################################### +[testenv:flake8] +deps = flake8 + flake8-import-order +commands = + flake8 src/ tests/ + + +####################################################################################### +[testenv:pylint] +deps = {[testenv]deps} + -r{toxinidir}/requirements-test.txt + pylint +commands = + pylint -E src/ tests/ + + +####################################################################################### +[testenv:safety] setenv = - PYTHONHASHSEED=0 - PYTHONPATH = {toxinidir}/src - CHARM_NAME = mysqld-exporter + LC_ALL=C.UTF-8 + LANG=C.UTF-8 +deps = {[testenv]deps} + safety +commands = + - safety check --full-report + +####################################################################################### +[testenv:yamllint] +deps = {[testenv]deps} + -r{toxinidir}/requirements-test.txt + yamllint +commands = yamllint . + +####################################################################################### [testenv:build] passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY +deps = charmcraft whitelist_externals = charmcraft - rm - unzip + cp commands = - rm -rf release mysqld-exporter.charm charmcraft build - unzip mysqld-exporter.charm -d release + cp -r build release -[testenv:unit] -commands = - coverage erase - stestr run --slowest --test-path=./tests --top-dir=./ - coverage combine - coverage html -d cover - coverage xml -o cover/coverage.xml - coverage report -deps = - coverage - stestr - mock - ops -setenv = - {[testenv]setenv} - PYTHON=coverage run - -[testenv:lint] -deps = - black - yamllint - flake8 -commands = - black --check --diff . --exclude "build/|.tox/|mod/|lib/" - yamllint . - flake8 . --max-line-length=100 --ignore="E501,W503,W504,F722" --exclude "build/ .tox/ mod/ lib/" - -[coverage:run] -branch = True -concurrency = multiprocessing -parallel = True -source = - . -omit = - .tox/* - tests/* +####################################################################################### +[flake8] +ignore = + W291, + W293, + W503, + E123, + E125, + E226, + E241, +exclude = + .git, + __pycache__, + .tox, +max-line-length = 120 +show-source = True +builtins = _ +max-complexity = 10 +import-order-style = google diff --git a/installers/charm/nbi/requirements-test.txt b/installers/charm/nbi/requirements-test.txt index cf61dd4e..316f6d20 100644 --- a/installers/charm/nbi/requirements-test.txt +++ b/installers/charm/nbi/requirements-test.txt @@ -17,4 +17,5 @@ # # To get in touch with the maintainers, please contact: # osm-charmers@lists.launchpad.net + mock==4.0.3 diff --git a/installers/charm/nbi/requirements.txt b/installers/charm/nbi/requirements.txt index 1a8928c7..8bb93ad3 100644 --- a/installers/charm/nbi/requirements.txt +++ b/installers/charm/nbi/requirements.txt @@ -19,4 +19,4 @@ # osm-charmers@lists.launchpad.net ## -git+https://github.com/charmed-osm/ops-lib-charmed-osm/@master \ No newline at end of file +git+https://github.com/charmed-osm/ops-lib-charmed-osm/@master