From: sousaedu Date: Wed, 4 Nov 2020 17:43:47 +0000 (+0000) Subject: Refactoring MON Charm to use Operator Framework X-Git-Tag: release-v9.0-start~36 X-Git-Url: https://osm.etsi.org/gitweb/?p=osm%2Fdevops.git;a=commitdiff_plain;h=1dd4c0d0edbeb0579540f094930743d47dce5174 Refactoring MON Charm to use Operator Framework This refactoring work includes tests. Note 1: old charm is in mon-k8s folder. Note 2: relation-departed is currently not tested because there is no function to remove a relation in the Testing Harness. There is currently one issue open and the Charmcraft team should provide feedback soon. Change-Id: I57337260579975a1d07157d616acac5bcbef0577 Signed-off-by: sousaedu --- diff --git a/installers/charm/mon/.gitignore b/installers/charm/mon/.gitignore new file mode 100644 index 00000000..0be86d64 --- /dev/null +++ b/installers/charm/mon/.gitignore @@ -0,0 +1,28 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact: legal@canonical.com +# +# To get in touch with the maintainers, please contact: +# osm-charmers@lists.launchpad.net +## + +venv +.vscode +build +mon.charm +.coverage +.stestr +cover diff --git a/installers/charm/mon/.yamllint.yaml b/installers/charm/mon/.yamllint.yaml new file mode 100644 index 00000000..c20ac8d6 --- /dev/null +++ b/installers/charm/mon/.yamllint.yaml @@ -0,0 +1,33 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact: legal@canonical.com +# +# To get in touch with the maintainers, please contact: +# osm-charmers@lists.launchpad.net +## + +--- +extends: default + +yaml-files: + - "*.yaml" + - "*.yml" + - ".yamllint" +ignore: | + .tox + build/ + mod/ + lib/ diff --git a/installers/charm/mon/README.md b/installers/charm/mon/README.md new file mode 100644 index 00000000..216a7846 --- /dev/null +++ b/installers/charm/mon/README.md @@ -0,0 +1,23 @@ + + +# MON operator Charm for Kubernetes + +## Requirements diff --git a/installers/charm/mon/config.yaml b/installers/charm/mon/config.yaml new file mode 100644 index 00000000..9480ea78 --- /dev/null +++ b/installers/charm/mon/config.yaml @@ -0,0 +1,62 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact: legal@canonical.com +# +# To get in touch with the maintainers, please contact: +# osm-charmers@lists.launchpad.net +## + +options: + openstack_default_granularity: + description: Openstack default granularity + type: int + default: 300 + global_request_timeout: + description: Global request timeout + type: int + default: 10 + log_level: + description: Log level + type: string + default: INFO + database_commonkey: + description: Database common key + type: string + default: osm + collector_interval: + description: Collector interval + type: int + default: 30 + evaluator_interval: + description: Evaluator interval + type: int + default: 30 + vca_host: + type: string + description: "The VCA host." + default: "admin" + vca_user: + type: string + description: "The VCA user name." + default: "admin" + vca_password: + type: string + description: "The VCA user password." + default: "secret" + vca_cacert: + type: string + description: "The VCA cacert." + default: "" diff --git a/installers/charm/mon/metadata.yaml b/installers/charm/mon/metadata.yaml new file mode 100644 index 00000000..2578b1eb --- /dev/null +++ b/installers/charm/mon/metadata.yaml @@ -0,0 +1,47 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact: legal@canonical.com +# +# To get in touch with the maintainers, please contact: +# osm-charmers@lists.launchpad.net +## + +name: mon +summary: OSM Monitoring (MON) +description: | + A CAAS charm to deploy OSM's Monitoring (MON). +series: + - kubernetes +tags: + - kubernetes + - osm + - mon +min-juju-version: 2.8.0 +deployment: + type: stateless + service: cluster +resources: + image: + type: oci-image + description: OSM docker image for MON + upstream-source: "opensourcemano/mon:8" +requires: + kafka: + interface: kafka + mongodb: + interface: mongodb + prometheus: + interface: prometheus diff --git a/installers/charm/mon/requirements.txt b/installers/charm/mon/requirements.txt new file mode 100644 index 00000000..24f1672c --- /dev/null +++ b/installers/charm/mon/requirements.txt @@ -0,0 +1,24 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact: legal@canonical.com +# +# To get in touch with the maintainers, please contact: +# osm-charmers@lists.launchpad.net +## + +ops +pydantic +git+https://github.com/juju-solutions/resource-oci-image/@c5778285d332edf3d9a538f9d0c06154b7ec1b0b#egg=oci-image diff --git a/installers/charm/mon/src/charm.py b/installers/charm/mon/src/charm.py new file mode 100755 index 00000000..a6f405f9 --- /dev/null +++ b/installers/charm/mon/src/charm.py @@ -0,0 +1,281 @@ +#!/usr/bin/env python3 +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact: legal@canonical.com +# +# To get in touch with the maintainers, please contact: +# osm-charmers@lists.launchpad.net +## + +import logging +from pydantic import ValidationError +from typing import Any, Dict, NoReturn + +from ops.charm import CharmBase, CharmEvents +from ops.framework import EventBase, EventSource, StoredState +from ops.main import main +from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus +from oci_image import OCIImageResource, OCIImageResourceError + +from pod_spec import make_pod_spec + +LOGGER = logging.getLogger(__name__) + +MON_PORT = 8000 + + +class ConfigurePodEvent(EventBase): + """Configure Pod event""" + + pass + + +class MonEvents(CharmEvents): + """MON Events""" + + configure_pod = EventSource(ConfigurePodEvent) + + +class MonCharm(CharmBase): + """MON Charm.""" + + state = StoredState() + on = MonEvents() + + def __init__(self, *args) -> NoReturn: + """MON Charm constructor.""" + super().__init__(*args) + + # Internal state initialization + self.state.set_default(pod_spec=None) + + # Message bus data initialization + self.state.set_default(message_host=None) + self.state.set_default(message_port=None) + + # Database data initialization + self.state.set_default(database_uri=None) + + # Prometheus data initialization + self.state.set_default(prometheus_host=None) + self.state.set_default(prometheus_port=None) + + self.port = MON_PORT + self.image = OCIImageResource(self, "image") + + # Registering regular events + self.framework.observe(self.on.start, self.configure_pod) + self.framework.observe(self.on.config_changed, self.configure_pod) + self.framework.observe(self.on.upgrade_charm, self.configure_pod) + + # Registering custom internal events + self.framework.observe(self.on.configure_pod, self.configure_pod) + + # Registering required relation events + self.framework.observe( + self.on.kafka_relation_changed, self._on_kafka_relation_changed + ) + self.framework.observe( + self.on.mongodb_relation_changed, self._on_mongodb_relation_changed + ) + self.framework.observe( + self.on.prometheus_relation_changed, self._on_prometheus_relation_changed + ) + + # Registering required relation departed events + self.framework.observe( + self.on.kafka_relation_departed, self._on_kafka_relation_departed + ) + self.framework.observe( + self.on.mongodb_relation_departed, self._on_mongodb_relation_departed + ) + self.framework.observe( + self.on.prometheus_relation_departed, self._on_prometheus_relation_departed + ) + + def _on_kafka_relation_changed(self, event: EventBase) -> NoReturn: + """Reads information about the kafka relation. + + Args: + event (EventBase): Kafka relation event. + """ + data_loc = event.unit if event.unit else event.app + + message_host = event.relation.data[data_loc].get("host") + message_port = event.relation.data[data_loc].get("port") + + if ( + message_host + and message_port + and ( + self.state.message_host != message_host + or self.state.message_port != message_port + ) + ): + self.state.message_host = message_host + self.state.message_port = message_port + self.on.configure_pod.emit() + + def _on_kafka_relation_departed(self, event: EventBase) -> NoReturn: + """Clear kafka relation data. + + Args: + event (EventBase): Kafka relation event. + """ + self.state.message_host = None + self.state.message_port = None + self.on.configure_pod.emit() + + def _on_mongodb_relation_changed(self, event: EventBase) -> NoReturn: + """Reads information about the DB relation. + + Args: + event (EventBase): DB relation event. + """ + data_loc = event.unit if event.unit else event.app + + database_uri = event.relation.data[data_loc].get("connection_string") + + if database_uri and self.state.database_uri != database_uri: + self.state.database_uri = database_uri + self.on.configure_pod.emit() + + def _on_mongodb_relation_departed(self, event: EventBase) -> NoReturn: + """Clear mongodb relation data. + + Args: + event (EventBase): DB relation event. + """ + self.state.database_uri = None + self.on.configure_pod.emit() + + def _on_prometheus_relation_changed(self, event: EventBase) -> NoReturn: + """Reads information about the prometheus relation. + + Args: + event (EventBase): Prometheus relation event. + """ + data_loc = event.unit if event.unit else event.app + + prometheus_host = event.relation.data[data_loc].get("hostname") + prometheus_port = event.relation.data[data_loc].get("port") + + if ( + prometheus_host + and prometheus_port + and ( + self.state.prometheus_host != prometheus_host + or self.state.prometheus_port != prometheus_port + ) + ): + self.state.prometheus_host = prometheus_host + self.state.prometheus_port = prometheus_port + self.on.configure_pod.emit() + + def _on_prometheus_relation_departed(self, event: EventBase) -> NoReturn: + """Clear prometheus relation data. + + Args: + event (EventBase): Prometheus relation event. + """ + self.state.prometheus_host = None + self.state.prometheus_port = None + self.on.configure_pod.emit() + + def _missing_relations(self) -> str: + """Checks if there missing relations. + + Returns: + str: string with missing relations + """ + data_status = { + "kafka": self.state.message_host, + "mongodb": self.state.database_uri, + "prometheus": self.state.prometheus_host, + } + + missing_relations = [k for k, v in data_status.items() if not v] + + return ", ".join(missing_relations) + + @property + def relation_state(self) -> Dict[str, Any]: + """Collects relation state configuration for pod spec assembly. + + Returns: + Dict[str, Any]: relation state information. + """ + relation_state = { + "message_host": self.state.message_host, + "message_port": self.state.message_port, + "database_uri": self.state.database_uri, + "prometheus_host": self.state.prometheus_host, + "prometheus_port": self.state.prometheus_port, + } + + return relation_state + + def configure_pod(self, event: EventBase) -> NoReturn: + """Assemble the pod spec and apply it, if possible. + + Args: + event (EventBase): Hook or Relation event that started the + function. + """ + if missing := self._missing_relations(): + self.unit.status = BlockedStatus( + "Waiting for {0} relation{1}".format( + missing, "s" if "," in missing else "" + ) + ) + return + + if not self.unit.is_leader(): + self.unit.status = ActiveStatus("ready") + return + + self.unit.status = MaintenanceStatus("Assembling pod spec") + + # Fetch image information + try: + self.unit.status = MaintenanceStatus("Fetching image information") + image_info = self.image.fetch() + except OCIImageResourceError: + self.unit.status = BlockedStatus("Error fetching image information") + return + + try: + pod_spec = make_pod_spec( + image_info, + self.model.config, + self.relation_state, + self.model.app.name, + self.port, + ) + except ValidationError as exc: + LOGGER.exception("Config/Relation data validation error") + self.unit.status = BlockedStatus(str(exc)) + return + + if self.state.pod_spec != pod_spec: + self.model.pod.set_spec(pod_spec) + self.state.pod_spec = pod_spec + + self.unit.status = ActiveStatus("ready") + + +if __name__ == "__main__": + main(MonCharm) diff --git a/installers/charm/mon/src/pod_spec.py b/installers/charm/mon/src/pod_spec.py new file mode 100644 index 00000000..5c9182a5 --- /dev/null +++ b/installers/charm/mon/src/pod_spec.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python3 +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact: legal@canonical.com +# +# To get in touch with the maintainers, please contact: +# osm-charmers@lists.launchpad.net +## + +import logging +from pydantic import BaseModel, constr, PositiveInt +from typing import Any, Dict, List + +logger = logging.getLogger(__name__) + + +class ConfigData(BaseModel): + """Configuration data model.""" + + openstack_default_granularity: PositiveInt + global_request_timeout: PositiveInt + log_level: constr(regex=r"^(INFO|DEBUG)$") + collector_interval: PositiveInt + evaluator_interval: PositiveInt + database_commonkey: constr(min_length=1) + vca_host: constr(min_length=1) + vca_user: constr(min_length=1) + vca_password: constr(min_length=1) + vca_cacert: str + + +class RelationData(BaseModel): + """Relation data model.""" + + message_host: constr(min_length=1) + message_port: PositiveInt + database_uri: constr(regex=r"^(mongodb://)") + prometheus_host: constr(min_length=1) + prometheus_port: PositiveInt + + +def _make_pod_ports(port: int) -> List[Dict[str, Any]]: + """Generate pod ports details. + + Args: + port (int): port to expose. + + Returns: + List[Dict[str, Any]]: pod port details. + """ + return [{"name": "mon", "containerPort": port, "protocol": "TCP"}] + + +def _make_pod_envconfig( + config: Dict[str, Any], relation_state: Dict[str, Any] +) -> Dict[str, Any]: + """Generate pod environment configuration. + + Args: + config (Dict[str, Any]): configuration information. + relation_state (Dict[str, Any]): relation state information. + + Returns: + Dict[str, Any]: pod environment configuration. + """ + envconfig = { + # General configuration + "ALLOW_ANONYMOUS_LOGIN": "yes", + "OSMMON_OPENSTACK_DEFAULT_GRANULARITY": config["openstack_default_granularity"], + "OSMMON_GLOBAL_REQUEST_TIMEOUT": config["global_request_timeout"], + "OSMMON_GLOBAL_LOGLEVEL": config["log_level"], + "OSMMON_COLLECTOR_INTERVAL": config["collector_interval"], + "OSMMON_EVALUATOR_INTERVAL": config["evaluator_interval"], + # Kafka configuration + "OSMMON_MESSAGE_DRIVER": "kafka", + "OSMMON_MESSAGE_HOST": relation_state["message_host"], + "OSMMON_MESSAGE_PORT": relation_state["message_port"], + # Database configuration + "OSMMON_DATABASE_DRIVER": "mongo", + "OSMMON_DATABASE_URI": relation_state["database_uri"], + "OSMMON_DATABASE_COMMONKEY": config["database_commonkey"], + # Prometheus configuration + "OSMMON_PROMETHEUS_URL": f"http://{relation_state['prometheus_host']}:{relation_state['prometheus_port']}", + # VCA configuration + "OSMMON_VCA_HOST": config["vca_host"], + "OSMMON_VCA_USER": config["vca_user"], + "OSMMON_VCA_SECRET": config["vca_password"], + "OSMMON_VCA_CACERT": config["vca_cacert"], + } + + return envconfig + + +def _make_startup_probe() -> Dict[str, Any]: + """Generate startup probe. + + Returns: + Dict[str, Any]: startup probe. + """ + return { + "exec": {"command": ["/usr/bin/pgrep python3"]}, + "initialDelaySeconds": 60, + "timeoutSeconds": 5, + } + + +def _make_readiness_probe(port: int) -> Dict[str, Any]: + """Generate readiness probe. + + Args: + port (int): [description] + + Returns: + Dict[str, Any]: readiness probe. + """ + return { + "tcpSocket": { + "port": port, + }, + "periodSeconds": 10, + "timeoutSeconds": 5, + "successThreshold": 1, + "failureThreshold": 3, + } + + +def _make_liveness_probe(port: int) -> Dict[str, Any]: + """Generate liveness probe. + + Args: + port (int): [description] + + Returns: + Dict[str, Any]: liveness probe. + """ + return { + "tcpSocket": { + "port": port, + }, + "initialDelaySeconds": 45, + "periodSeconds": 10, + "timeoutSeconds": 5, + "successThreshold": 1, + "failureThreshold": 3, + } + + +def make_pod_spec( + image_info: Dict[str, str], + config: Dict[str, Any], + relation_state: Dict[str, Any], + app_name: str = "mon", + port: int = 8000, +) -> Dict[str, Any]: + """Generate the pod spec information. + + Args: + image_info (Dict[str, str]): Object provided by + OCIImageResource("image").fetch(). + config (Dict[str, Any]): Configuration information. + relation_state (Dict[str, Any]): Relation state information. + app_name (str, optional): Application name. Defaults to "mon". + port (int, optional): Port for the container. Defaults to 8000. + + Returns: + Dict[str, Any]: Pod spec dictionary for the charm. + """ + if not image_info: + return None + + ConfigData(**(config)) + RelationData(**(relation_state)) + + ports = _make_pod_ports(port) + env_config = _make_pod_envconfig(config, relation_state) + + return { + "version": 3, + "containers": [ + { + "name": app_name, + "imageDetails": image_info, + "imagePullPolicy": "Always", + "ports": ports, + "envConfig": env_config, + } + ], + "kubernetesResources": { + "ingressResources": [], + }, + } diff --git a/installers/charm/mon/tests/__init__.py b/installers/charm/mon/tests/__init__.py new file mode 100644 index 00000000..d0d973ae --- /dev/null +++ b/installers/charm/mon/tests/__init__.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact: legal@canonical.com +# +# To get in touch with the maintainers, please contact: +# osm-charmers@lists.launchpad.net +## + +"""Init mocking for unit tests.""" + +import sys +import mock + +sys.path.append("src") + +oci_image = mock.MagicMock() +sys.modules["oci_image"] = oci_image diff --git a/installers/charm/mon/tests/test_charm.py b/installers/charm/mon/tests/test_charm.py new file mode 100644 index 00000000..5ea4efae --- /dev/null +++ b/installers/charm/mon/tests/test_charm.py @@ -0,0 +1,326 @@ +#!/usr/bin/env python3 +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact: legal@canonical.com +# +# To get in touch with the maintainers, please contact: +# osm-charmers@lists.launchpad.net +## + +from typing import NoReturn +import unittest +from ops.model import BlockedStatus + +from ops.testing import Harness + +from charm import MonCharm + + +class TestCharm(unittest.TestCase): + """MON Charm unit tests.""" + + def setUp(self) -> NoReturn: + """Test setup""" + self.harness = Harness(MonCharm) + self.harness.set_leader(is_leader=True) + self.harness.begin() + + def test_on_start_without_relations(self) -> NoReturn: + """Test installation without any relation.""" + self.harness.charm.on.start.emit() + + # Verifying status + self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus) + + # Verifying status message + self.assertGreater(len(self.harness.charm.unit.status.message), 0) + self.assertTrue( + self.harness.charm.unit.status.message.startswith("Waiting for ") + ) + self.assertIn("kafka", self.harness.charm.unit.status.message) + self.assertIn("mongodb", self.harness.charm.unit.status.message) + self.assertIn("prometheus", self.harness.charm.unit.status.message) + self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations")) + + def test_on_start_with_relations(self) -> NoReturn: + """Test deployment without keystone.""" + expected_result = { + "version": 3, + "containers": [ + { + "name": "mon", + "imageDetails": self.harness.charm.image.fetch(), + "imagePullPolicy": "Always", + "ports": [ + { + "name": "mon", + "containerPort": 8000, + "protocol": "TCP", + } + ], + "envConfig": { + "ALLOW_ANONYMOUS_LOGIN": "yes", + "OSMMON_OPENSTACK_DEFAULT_GRANULARITY": 300, + "OSMMON_GLOBAL_REQUEST_TIMEOUT": 10, + "OSMMON_GLOBAL_LOGLEVEL": "INFO", + "OSMMON_COLLECTOR_INTERVAL": 30, + "OSMMON_EVALUATOR_INTERVAL": 30, + "OSMMON_MESSAGE_DRIVER": "kafka", + "OSMMON_MESSAGE_HOST": "kafka", + "OSMMON_MESSAGE_PORT": 9092, + "OSMMON_DATABASE_DRIVER": "mongo", + "OSMMON_DATABASE_URI": "mongodb://mongo:27017", + "OSMMON_DATABASE_COMMONKEY": "osm", + "OSMMON_PROMETHEUS_URL": "http://prometheus:9090", + "OSMMON_VCA_HOST": "admin", + "OSMMON_VCA_USER": "admin", + "OSMMON_VCA_SECRET": "secret", + "OSMMON_VCA_CACERT": "", + }, + } + ], + "kubernetesResources": {"ingressResources": []}, + } + + self.harness.charm.on.start.emit() + + # Check if kafka datastore is initialized + self.assertIsNone(self.harness.charm.state.message_host) + self.assertIsNone(self.harness.charm.state.message_port) + + # Check if mongodb datastore is initialized + self.assertIsNone(self.harness.charm.state.database_uri) + + # Check if prometheus datastore is initialized + self.assertIsNone(self.harness.charm.state.prometheus_host) + self.assertIsNone(self.harness.charm.state.prometheus_port) + + # Initializing the kafka relation + kafka_relation_id = self.harness.add_relation("kafka", "kafka") + self.harness.add_relation_unit(kafka_relation_id, "kafka/0") + self.harness.update_relation_data( + kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092} + ) + + # Initializing the mongo relation + mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb") + self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0") + self.harness.update_relation_data( + mongodb_relation_id, + "mongodb/0", + {"connection_string": "mongodb://mongo:27017"}, + ) + + # Initializing the prometheus relation + prometheus_relation_id = self.harness.add_relation("prometheus", "prometheus") + self.harness.add_relation_unit(prometheus_relation_id, "prometheus/0") + self.harness.update_relation_data( + prometheus_relation_id, + "prometheus/0", + {"hostname": "prometheus", "port": 9090}, + ) + + # Checking if kafka data is stored + self.assertEqual(self.harness.charm.state.message_host, "kafka") + self.assertEqual(self.harness.charm.state.message_port, 9092) + + # Checking if mongodb data is stored + self.assertEqual(self.harness.charm.state.database_uri, "mongodb://mongo:27017") + + # Checking if prometheus data is stored + self.assertEqual(self.harness.charm.state.prometheus_host, "prometheus") + self.assertEqual(self.harness.charm.state.prometheus_port, 9090) + + # Verifying status + self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus) + + pod_spec, _ = self.harness.get_pod_spec() + + self.assertDictEqual(expected_result, pod_spec) + + def test_on_kafka_app_relation_changed(self) -> NoReturn: + """Test to see if kafka relation is updated.""" + self.harness.charm.on.start.emit() + + self.assertIsNone(self.harness.charm.state.message_host) + self.assertIsNone(self.harness.charm.state.message_port) + + relation_id = self.harness.add_relation("kafka", "kafka") + self.harness.add_relation_unit(relation_id, "kafka/0") + self.harness.update_relation_data( + relation_id, "kafka", {"host": "kafka", "port": 9092} + ) + + self.assertEqual(self.harness.charm.state.message_host, "kafka") + self.assertEqual(self.harness.charm.state.message_port, 9092) + + # Verifying status + self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus) + + # Verifying status message + self.assertGreater(len(self.harness.charm.unit.status.message), 0) + self.assertTrue( + self.harness.charm.unit.status.message.startswith("Waiting for ") + ) + self.assertNotIn("kafka", self.harness.charm.unit.status.message) + self.assertIn("mongodb", self.harness.charm.unit.status.message) + self.assertIn("prometheus", self.harness.charm.unit.status.message) + self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations")) + + def test_on_kafka_unit_relation_changed(self) -> NoReturn: + """Test to see if kafka relation is updated.""" + self.harness.charm.on.start.emit() + + self.assertIsNone(self.harness.charm.state.message_host) + self.assertIsNone(self.harness.charm.state.message_port) + + relation_id = self.harness.add_relation("kafka", "kafka") + self.harness.add_relation_unit(relation_id, "kafka/0") + self.harness.update_relation_data( + relation_id, "kafka/0", {"host": "kafka", "port": 9092} + ) + + self.assertEqual(self.harness.charm.state.message_host, "kafka") + self.assertEqual(self.harness.charm.state.message_port, 9092) + + # Verifying status + self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus) + + # Verifying status message + self.assertGreater(len(self.harness.charm.unit.status.message), 0) + self.assertTrue( + self.harness.charm.unit.status.message.startswith("Waiting for ") + ) + self.assertNotIn("kafka", self.harness.charm.unit.status.message) + self.assertIn("mongodb", self.harness.charm.unit.status.message) + self.assertIn("prometheus", self.harness.charm.unit.status.message) + self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations")) + + def test_on_mongodb_app_relation_changed(self) -> NoReturn: + """Test to see if mongodb relation is updated.""" + self.harness.charm.on.start.emit() + + self.assertIsNone(self.harness.charm.state.database_uri) + + relation_id = self.harness.add_relation("mongodb", "mongodb") + self.harness.add_relation_unit(relation_id, "mongodb/0") + self.harness.update_relation_data( + relation_id, "mongodb", {"connection_string": "mongodb://mongo:27017"} + ) + + self.assertEqual(self.harness.charm.state.database_uri, "mongodb://mongo:27017") + + # Verifying status + self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus) + + # Verifying status message + self.assertGreater(len(self.harness.charm.unit.status.message), 0) + self.assertTrue( + self.harness.charm.unit.status.message.startswith("Waiting for ") + ) + self.assertIn("kafka", self.harness.charm.unit.status.message) + self.assertNotIn("mongodb", self.harness.charm.unit.status.message) + self.assertIn("prometheus", self.harness.charm.unit.status.message) + self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations")) + + def test_on_mongodb_unit_relation_changed(self) -> NoReturn: + """Test to see if mongodb relation is updated.""" + self.harness.charm.on.start.emit() + + self.assertIsNone(self.harness.charm.state.database_uri) + + relation_id = self.harness.add_relation("mongodb", "mongodb") + self.harness.add_relation_unit(relation_id, "mongodb/0") + self.harness.update_relation_data( + relation_id, "mongodb/0", {"connection_string": "mongodb://mongo:27017"} + ) + + self.assertEqual(self.harness.charm.state.database_uri, "mongodb://mongo:27017") + + # Verifying status + self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus) + + # Verifying status message + self.assertGreater(len(self.harness.charm.unit.status.message), 0) + self.assertTrue( + self.harness.charm.unit.status.message.startswith("Waiting for ") + ) + self.assertIn("kafka", self.harness.charm.unit.status.message) + self.assertNotIn("mongodb", self.harness.charm.unit.status.message) + self.assertIn("prometheus", self.harness.charm.unit.status.message) + self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations")) + + def test_on_prometheus_app_relation_changed(self) -> NoReturn: + """Test to see if prometheus relation is updated.""" + self.harness.charm.on.start.emit() + + self.assertIsNone(self.harness.charm.state.prometheus_host) + self.assertIsNone(self.harness.charm.state.prometheus_port) + + relation_id = self.harness.add_relation("prometheus", "prometheus") + self.harness.add_relation_unit(relation_id, "prometheus/0") + self.harness.update_relation_data( + relation_id, "prometheus", {"hostname": "prometheus", "port": 9090} + ) + + self.assertEqual(self.harness.charm.state.prometheus_host, "prometheus") + self.assertEqual(self.harness.charm.state.prometheus_port, 9090) + + # Verifying status + self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus) + + # Verifying status message + self.assertGreater(len(self.harness.charm.unit.status.message), 0) + self.assertTrue( + self.harness.charm.unit.status.message.startswith("Waiting for ") + ) + self.assertIn("kafka", self.harness.charm.unit.status.message) + self.assertIn("mongodb", self.harness.charm.unit.status.message) + self.assertNotIn("prometheus", self.harness.charm.unit.status.message) + self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations")) + + def test_on_prometheus_unit_relation_changed(self) -> NoReturn: + """Test to see if prometheus relation is updated.""" + self.harness.charm.on.start.emit() + + self.assertIsNone(self.harness.charm.state.prometheus_host) + self.assertIsNone(self.harness.charm.state.prometheus_port) + + relation_id = self.harness.add_relation("prometheus", "prometheus") + self.harness.add_relation_unit(relation_id, "prometheus/0") + self.harness.update_relation_data( + relation_id, "prometheus/0", {"hostname": "prometheus", "port": 9090} + ) + + self.assertEqual(self.harness.charm.state.prometheus_host, "prometheus") + self.assertEqual(self.harness.charm.state.prometheus_port, 9090) + + # Verifying status + self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus) + + # Verifying status message + self.assertGreater(len(self.harness.charm.unit.status.message), 0) + self.assertTrue( + self.harness.charm.unit.status.message.startswith("Waiting for ") + ) + self.assertIn("kafka", self.harness.charm.unit.status.message) + self.assertIn("mongodb", self.harness.charm.unit.status.message) + self.assertNotIn("prometheus", self.harness.charm.unit.status.message) + self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations")) + + +if __name__ == "__main__": + unittest.main() diff --git a/installers/charm/mon/tests/test_pod_spec.py b/installers/charm/mon/tests/test_pod_spec.py new file mode 100644 index 00000000..eff2713a --- /dev/null +++ b/installers/charm/mon/tests/test_pod_spec.py @@ -0,0 +1,294 @@ +#!/usr/bin/env python3 +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact: legal@canonical.com +# +# To get in touch with the maintainers, please contact: +# osm-charmers@lists.launchpad.net +## + +from pydantic import ValidationError +from typing import NoReturn +import unittest + +import pod_spec + + +class TestPodSpec(unittest.TestCase): + """Pod spec unit tests.""" + + def test_make_pod_ports(self) -> NoReturn: + """Testing make pod ports.""" + port = 8000 + + expected_result = [ + { + "name": "mon", + "containerPort": port, + "protocol": "TCP", + } + ] + + pod_ports = pod_spec._make_pod_ports(port) + + self.assertListEqual(expected_result, pod_ports) + + def test_make_pod_envconfig(self) -> NoReturn: + """Testing make pod envconfig.""" + config = { + "openstack_default_granularity": 300, + "global_request_timeout": 10, + "log_level": "INFO", + "database_commonkey": "osm", + "collector_interval": 30, + "evaluator_interval": 30, + "vca_host": "admin", + "vca_user": "admin", + "vca_password": "secret", + "vca_cacert": "", + } + relation_state = { + "message_host": "kafka", + "message_port": 9090, + "database_uri": "mongodb://mongo", + "prometheus_host": "prometheus", + "prometheus_port": 9082, + } + + expected_result = { + "ALLOW_ANONYMOUS_LOGIN": "yes", + "OSMMON_OPENSTACK_DEFAULT_GRANULARITY": config[ + "openstack_default_granularity" + ], + "OSMMON_GLOBAL_REQUEST_TIMEOUT": config["global_request_timeout"], + "OSMMON_GLOBAL_LOGLEVEL": config["log_level"], + "OSMMON_COLLECTOR_INTERVAL": config["collector_interval"], + "OSMMON_EVALUATOR_INTERVAL": config["evaluator_interval"], + "OSMMON_MESSAGE_DRIVER": "kafka", + "OSMMON_MESSAGE_HOST": relation_state["message_host"], + "OSMMON_MESSAGE_PORT": relation_state["message_port"], + "OSMMON_DATABASE_DRIVER": "mongo", + "OSMMON_DATABASE_URI": relation_state["database_uri"], + "OSMMON_DATABASE_COMMONKEY": config["database_commonkey"], + "OSMMON_PROMETHEUS_URL": f"http://{relation_state['prometheus_host']}:{relation_state['prometheus_port']}", + "OSMMON_VCA_HOST": config["vca_host"], + "OSMMON_VCA_USER": config["vca_user"], + "OSMMON_VCA_SECRET": config["vca_password"], + "OSMMON_VCA_CACERT": config["vca_cacert"], + } + + pod_envconfig = pod_spec._make_pod_envconfig(config, relation_state) + + self.assertDictEqual(expected_result, pod_envconfig) + + def test_make_startup_probe(self) -> NoReturn: + """Testing make startup probe.""" + expected_result = { + "exec": {"command": ["/usr/bin/pgrep python3"]}, + "initialDelaySeconds": 60, + "timeoutSeconds": 5, + } + + startup_probe = pod_spec._make_startup_probe() + + self.assertDictEqual(expected_result, startup_probe) + + def test_make_readiness_probe(self) -> NoReturn: + """Testing make readiness probe.""" + port = 8000 + + expected_result = { + "tcpSocket": { + "port": port, + }, + "periodSeconds": 10, + "timeoutSeconds": 5, + "successThreshold": 1, + "failureThreshold": 3, + } + + readiness_probe = pod_spec._make_readiness_probe(port) + + self.assertDictEqual(expected_result, readiness_probe) + + def test_make_liveness_probe(self) -> NoReturn: + """Testing make liveness probe.""" + port = 8000 + + expected_result = { + "tcpSocket": { + "port": port, + }, + "initialDelaySeconds": 45, + "periodSeconds": 10, + "timeoutSeconds": 5, + "successThreshold": 1, + "failureThreshold": 3, + } + + liveness_probe = pod_spec._make_liveness_probe(port) + + self.assertDictEqual(expected_result, liveness_probe) + + def test_make_pod_spec(self) -> NoReturn: + """Testing make pod spec.""" + image_info = {"upstream-source": "opensourcemano/mon:8"} + config = { + "site_url": "", + "openstack_default_granularity": 300, + "global_request_timeout": 10, + "log_level": "INFO", + "database_commonkey": "osm", + "collector_interval": 30, + "evaluator_interval": 30, + "vca_host": "admin", + "vca_user": "admin", + "vca_password": "secret", + "vca_cacert": "", + } + relation_state = { + "message_host": "kafka", + "message_port": 9090, + "database_uri": "mongodb://mongo", + "prometheus_host": "prometheus", + "prometheus_port": 9082, + } + app_name = "mon" + port = 8000 + + expected_result = { + "version": 3, + "containers": [ + { + "name": app_name, + "imageDetails": image_info, + "imagePullPolicy": "Always", + "ports": [ + { + "name": app_name, + "containerPort": port, + "protocol": "TCP", + } + ], + "envConfig": { + "ALLOW_ANONYMOUS_LOGIN": "yes", + "OSMMON_OPENSTACK_DEFAULT_GRANULARITY": config[ + "openstack_default_granularity" + ], + "OSMMON_GLOBAL_REQUEST_TIMEOUT": config[ + "global_request_timeout" + ], + "OSMMON_GLOBAL_LOGLEVEL": config["log_level"], + "OSMMON_COLLECTOR_INTERVAL": config["collector_interval"], + "OSMMON_EVALUATOR_INTERVAL": config["evaluator_interval"], + "OSMMON_MESSAGE_DRIVER": "kafka", + "OSMMON_MESSAGE_HOST": relation_state["message_host"], + "OSMMON_MESSAGE_PORT": relation_state["message_port"], + "OSMMON_DATABASE_DRIVER": "mongo", + "OSMMON_DATABASE_URI": relation_state["database_uri"], + "OSMMON_DATABASE_COMMONKEY": config["database_commonkey"], + "OSMMON_PROMETHEUS_URL": f"http://{relation_state['prometheus_host']}:{relation_state['prometheus_port']}", + "OSMMON_VCA_HOST": config["vca_host"], + "OSMMON_VCA_USER": config["vca_user"], + "OSMMON_VCA_SECRET": config["vca_password"], + "OSMMON_VCA_CACERT": config["vca_cacert"], + }, + } + ], + "kubernetesResources": {"ingressResources": []}, + } + + spec = pod_spec.make_pod_spec( + image_info, config, relation_state, app_name, port + ) + + self.assertDictEqual(expected_result, spec) + + def test_make_pod_spec_without_image_info(self) -> NoReturn: + """Testing make pod spec without image_info.""" + image_info = None + config = { + "site_url": "", + "openstack_default_granularity": 300, + "global_request_timeout": 10, + "log_level": "INFO", + "database_commonkey": "osm", + "collector_interval": 30, + "evaluator_interval": 30, + "vca_host": "admin", + "vca_user": "admin", + "vca_password": "secret", + "vca_cacert": "", + } + relation_state = { + "message_host": "kafka", + "message_port": 9090, + "database_uri": "mongodb://mongo", + "prometheus_host": "prometheus", + "prometheus_port": 9082, + } + app_name = "mon" + port = 8000 + + spec = pod_spec.make_pod_spec( + image_info, config, relation_state, app_name, port + ) + + self.assertIsNone(spec) + + def test_make_pod_spec_without_config(self) -> NoReturn: + """Testing make pod spec without config.""" + image_info = {"upstream-source": "opensourcemano/mon:8"} + config = {} + relation_state = { + "message_host": "kafka", + "message_port": 9090, + "database_uri": "mongodb://mongo", + "prometheus_host": "prometheus", + "prometheus_port": 9082, + } + app_name = "mon" + port = 8000 + + with self.assertRaises(ValidationError): + pod_spec.make_pod_spec(image_info, config, relation_state, app_name, port) + + def test_make_pod_spec_without_relation_state(self) -> NoReturn: + """Testing make pod spec without relation_state.""" + image_info = {"upstream-source": "opensourcemano/mon:8"} + config = { + "site_url": "", + "openstack_default_granularity": 300, + "global_request_timeout": 10, + "log_level": "INFO", + "database_commonkey": "osm", + "collector_interval": 30, + "evaluator_interval": 30, + "vca_host": "admin", + "vca_user": "admin", + "vca_password": "secret", + "vca_cacert": "", + } + relation_state = {} + app_name = "mon" + port = 8000 + + with self.assertRaises(ValidationError): + pod_spec.make_pod_spec(image_info, config, relation_state, app_name, port) + + +if __name__ == "__main__": + unittest.main() diff --git a/installers/charm/mon/tox.ini b/installers/charm/mon/tox.ini new file mode 100644 index 00000000..7ab02dd0 --- /dev/null +++ b/installers/charm/mon/tox.ini @@ -0,0 +1,82 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# For those usages not covered by the Apache License, Version 2.0 please +# contact: legal@canonical.com +# +# To get in touch with the maintainers, please contact: +# osm-charmers@lists.launchpad.net +## + +[tox] +skipsdist = True +envlist = unit, lint +sitepackages = False +skip_missing_interpreters = False + +[testenv] +basepython = python3 +setenv = + PYTHONHASHSEED=0 + PYTHONPATH = {toxinidir}/src + CHARM_NAME = mon + +[testenv:build] +passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY +whitelist_externals = + charmcraft + rm + unzip +commands = + rm -rf release mon.charm + charmcraft build + unzip mon.charm -d release + +[testenv:unit] +commands = + coverage erase + stestr run --slowest --test-path=./tests --top-dir=./ + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report +deps = + coverage + stestr + mock + ops + pydantic +setenv = + {[testenv]setenv} + PYTHON=coverage run + +[testenv:lint] +deps = + black + yamllint + flake8 +commands = + black --check --diff . --exclude "build/|.tox/|mod/|lib/" + yamllint . + flake8 . --max-line-length=100 --ignore="E501,W503,W504,F722" --exclude "build/ .tox/ mod/ lib/" + +[coverage:run] +branch = True +concurrency = multiprocessing +parallel = True +source = + . +omit = + .tox/* + tests/*