This refactoring work includes tests.
Note 1: old charm is in nbi-k8s folder.
Note 2: relation-departed is currently not tested because there is
no function to remove a relation in the Testing Harness.
There is currently one issue open and the Charmcraft team
should provide feedback soon.
Change-Id: I25b94d205d2a004946189a231b5309da1deaa8ed
Signed-off-by: sousaedu <eduardo.sousa@canonical.com>
--- /dev/null
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+venv
+.vscode
+build
+nbi.charm
+.coverage
+.stestr
+cover
\ No newline at end of file
--- /dev/null
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+---
+extends: default
+
+yaml-files:
+ - "*.yaml"
+ - "*.yml"
+ - ".yamllint"
+ignore: |
+ .tox
+ build/
+ mod/
+ lib/
--- /dev/null
+<!-- Copyright 2020 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may
+not use this file except in compliance with the License. You may obtain
+a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+License for the specific language governing permissions and limitations
+under the License.
+
+For those usages not covered by the Apache License, Version 2.0 please
+contact: legal@canonical.com
+
+To get in touch with the maintainers, please contact:
+osm-charmers@lists.launchpad.net -->
+
+# NBI operator Charm for Kubernetes
+
+## Requirements
\ No newline at end of file
--- /dev/null
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+options:
+ max_file_size:
+ type: int
+ description: |
+ The maximum file size, in megabytes. If there is a reverse proxy in front
+ of Keystone, it may need to be configured to handle the requested size.
+ Note: if set to 0, there is no limit.
+ default: 0
+ ingress_whitelist_source_range:
+ type: string
+ description: |
+ A comma-separated list of CIDRs to store in the
+ ingress.kubernetes.io/whitelist-source-range annotation.
+
+ This can be used to lock down access to
+ Keystone based on source IP address.
+ default: ""
+ tls_secret_name:
+ type: string
+ description: TLS Secret name
+ default: ""
+ site_url:
+ type: string
+ description: Ingress URL
+ default: ""
+ log_level:
+ description: "Log Level"
+ type: string
+ default: "INFO"
+ database_commonkey:
+ description: Database COMMON KEY
+ type: string
+ default: osm
+ auth_backend:
+ type: string
+ description: Authentication backend ('internal' or 'keystone')
+ default: internal
+ enable_test:
+ type: boolean
+ description: Enable test endpoints of NBI.
+ default: false
--- /dev/null
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+name: nbi
+summary: OSM Northbound Interface (NBI)
+description: |
+ A CAAS charm to deploy OSM's Northbound Interface (NBI).
+series:
+ - kubernetes
+tags:
+ - kubernetes
+ - osm
+ - nbi
+min-juju-version: 2.8.0
+deployment:
+ type: stateless
+ service: cluster
+resources:
+ image:
+ type: oci-image
+ description: OSM docker image for NBI
+ upstream-source: "opensourcemano/nbi:8"
+requires:
+ kafka:
+ interface: kafka
+ mongodb:
+ interface: mongodb
+ keystone:
+ interface: keystone
+ prometheus:
+ interface: prometheus
+provides:
+ nbi:
+ interface: osm-nbi
--- /dev/null
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+ops
+pydantic
+git+https://github.com/juju-solutions/resource-oci-image/@c5778285d332edf3d9a538f9d0c06154b7ec1b0b#egg=oci-image
--- /dev/null
+#!/usr/bin/env python3
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+import logging
+from typing import Any, Dict, NoReturn
+from pydantic import ValidationError
+
+from ops.charm import CharmBase, CharmEvents
+from ops.framework import EventBase, EventSource, StoredState
+from ops.main import main
+from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus
+from oci_image import OCIImageResource, OCIImageResourceError
+
+from pod_spec import make_pod_spec
+
+LOGGER = logging.getLogger(__name__)
+
+NBI_PORT = 9999
+
+
+class ConfigurePodEvent(EventBase):
+ """Configure Pod event"""
+
+ pass
+
+
+class NbiEvents(CharmEvents):
+ """NBI Events"""
+
+ configure_pod = EventSource(ConfigurePodEvent)
+
+
+class NbiCharm(CharmBase):
+ """NBI Charm."""
+
+ state = StoredState()
+ on = NbiEvents()
+
+ def __init__(self, *args) -> NoReturn:
+ """NBI Charm constructor."""
+ super().__init__(*args)
+
+ # Internal state initialization
+ self.state.set_default(pod_spec=None)
+
+ # Message bus data initialization
+ self.state.set_default(message_host=None)
+ self.state.set_default(message_port=None)
+
+ # Database data initialization
+ self.state.set_default(database_uri=None)
+
+ # Prometheus data initialization
+ self.state.set_default(prometheus_host=None)
+ self.state.set_default(prometheus_port=None)
+
+ # Keystone data initialization
+ self.state.set_default(keystone_host=None)
+ self.state.set_default(keystone_port=None)
+ self.state.set_default(keystone_user_domain_name=None)
+ self.state.set_default(keystone_project_domain_name=None)
+ self.state.set_default(keystone_username=None)
+ self.state.set_default(keystone_password=None)
+ self.state.set_default(keystone_service=None)
+
+ self.port = NBI_PORT
+ self.image = OCIImageResource(self, "image")
+
+ # Registering regular events
+ self.framework.observe(self.on.start, self.configure_pod)
+ self.framework.observe(self.on.config_changed, self.configure_pod)
+ self.framework.observe(self.on.upgrade_charm, self.configure_pod)
+
+ # Registering custom internal events
+ self.framework.observe(self.on.configure_pod, self.configure_pod)
+
+ # Registering required relation changed events
+ self.framework.observe(
+ self.on.kafka_relation_changed, self._on_kafka_relation_changed
+ )
+ self.framework.observe(
+ self.on.mongodb_relation_changed, self._on_mongodb_relation_changed
+ )
+ self.framework.observe(
+ self.on.keystone_relation_changed, self._on_keystone_relation_changed
+ )
+ self.framework.observe(
+ self.on.prometheus_relation_changed, self._on_prometheus_relation_changed
+ )
+
+ # Registering required relation departed events
+ self.framework.observe(
+ self.on.kafka_relation_departed, self._on_kafka_relation_departed
+ )
+ self.framework.observe(
+ self.on.mongodb_relation_departed, self._on_mongodb_relation_departed
+ )
+ self.framework.observe(
+ self.on.keystone_relation_departed, self._on_keystone_relation_departed
+ )
+ self.framework.observe(
+ self.on.prometheus_relation_departed, self._on_prometheus_relation_departed
+ )
+
+ # Registering provided relation events
+ self.framework.observe(self.on.nbi_relation_joined, self._publish_nbi_info)
+
+ def _on_kafka_relation_changed(self, event: EventBase) -> NoReturn:
+ """Reads information about the kafka relation.
+
+ Args:
+ event (EventBase): Kafka relation event.
+ """
+ message_host = event.relation.data[event.unit].get("host")
+ message_port = event.relation.data[event.unit].get("port")
+
+ if (
+ message_host
+ and message_port
+ and (
+ self.state.message_host != message_host
+ or self.state.message_port != message_port
+ )
+ ):
+ self.state.message_host = message_host
+ self.state.message_port = message_port
+ self.on.configure_pod.emit()
+
+ def _on_kafka_relation_departed(self, event: EventBase) -> NoReturn:
+ """Clears data from kafka relation.
+
+ Args:
+ event (EventBase): Kafka relation event.
+ """
+ self.state.message_host = None
+ self.state.message_port = None
+ self.on.configure_pod.emit()
+
+ def _on_mongodb_relation_changed(self, event: EventBase) -> NoReturn:
+ """Reads information about the DB relation.
+
+ Args:
+ event (EventBase): DB relation event.
+ """
+ database_uri = event.relation.data[event.unit].get("connection_string")
+
+ if database_uri and self.state.database_uri != database_uri:
+ self.state.database_uri = database_uri
+ self.on.configure_pod.emit()
+
+ def _on_mongodb_relation_departed(self, event: EventBase) -> NoReturn:
+ """Clears data from mongodb relation.
+
+ Args:
+ event (EventBase): DB relation event.
+ """
+ self.state.database_uri = None
+ self.on.configure_pod.emit()
+
+ def _on_keystone_relation_changed(self, event: EventBase) -> NoReturn:
+ """Reads information about the keystone relation.
+
+ Args:
+ event (EventBase): Keystone relation event.
+ """
+ keystone_host = event.relation.data[event.unit].get("host")
+ keystone_port = event.relation.data[event.unit].get("port")
+ keystone_user_domain_name = event.relation.data[event.unit].get(
+ "user_domain_name"
+ )
+ keystone_project_domain_name = event.relation.data[event.unit].get(
+ "project_domain_name"
+ )
+ keystone_username = event.relation.data[event.unit].get("username")
+ keystone_password = event.relation.data[event.unit].get("password")
+ keystone_service = event.relation.data[event.unit].get("service")
+
+ if (
+ keystone_host
+ and keystone_port
+ and keystone_user_domain_name
+ and keystone_project_domain_name
+ and keystone_username
+ and keystone_password
+ and keystone_service
+ and (
+ self.state.keystone_host != keystone_host
+ or self.state.keystone_port != keystone_port
+ or self.state.keystone_user_domain_name != keystone_user_domain_name
+ or self.state.keystone_project_domain_name
+ != keystone_project_domain_name
+ or self.state.keystone_username != keystone_username
+ or self.state.keystone_password != keystone_password
+ or self.state.keystone_service != keystone_service
+ )
+ ):
+ self.state.keystone_host = keystone_host
+ self.state.keystone_port = keystone_port
+ self.state.keystone_user_domain_name = keystone_user_domain_name
+ self.state.keystone_project_domain_name = keystone_project_domain_name
+ self.state.keystone_username = keystone_username
+ self.state.keystone_password = keystone_password
+ self.state.keystone_service = keystone_service
+ self.on.configure_pod.emit()
+
+ def _on_keystone_relation_departed(self, event: EventBase) -> NoReturn:
+ """Clears data from keystone relation.
+
+ Args:
+ event (EventBase): Keystone relation event.
+ """
+ self.state.keystone_host = None
+ self.state.keystone_port = None
+ self.state.keystone_user_domain_name = None
+ self.state.keystone_project_domain_name = None
+ self.state.keystone_username = None
+ self.state.keystone_password = None
+ self.state.keystone_service = None
+ self.on.configure_pod.emit()
+
+ def _on_prometheus_relation_changed(self, event: EventBase) -> NoReturn:
+ """Reads information about the prometheus relation.
+
+ Args:
+ event (EventBase): Prometheus relation event.
+ """
+ prometheus_host = event.relation.data[event.unit].get("hostname")
+ prometheus_port = event.relation.data[event.unit].get("port")
+
+ if (
+ prometheus_host
+ and prometheus_port
+ and (
+ self.state.prometheus_host != prometheus_host
+ or self.state.prometheus_port != prometheus_port
+ )
+ ):
+ self.state.prometheus_host = prometheus_host
+ self.state.prometheus_port = prometheus_port
+ self.on.configure_pod.emit()
+
+ def _on_prometheus_relation_departed(self, event: EventBase) -> NoReturn:
+ """Clears data from prometheus relation.
+
+ Args:
+ event (EventBase): Prometheus relation event.
+ """
+ self.state.prometheus_host = None
+ self.state.prometheus_port = None
+ self.on.configure_pod.emit()
+
+ def _publish_nbi_info(self, event: EventBase) -> NoReturn:
+ """Publishes NBI information.
+
+ Args:
+ event (EventBase): NBI relation event.
+ """
+ if self.unit.is_leader():
+ rel_data = {
+ "host": self.model.app.name,
+ "port": str(NBI_PORT),
+ }
+ for k, v in rel_data.items():
+ event.relation.data[self.model.app][k] = v
+
+ def _missing_relations(self) -> str:
+ """Checks if there missing relations.
+
+ Returns:
+ str: string with missing relations
+ """
+ data_status = {
+ "kafka": self.state.message_host,
+ "mongodb": self.state.database_uri,
+ "prometheus": self.state.prometheus_host,
+ }
+
+ if self.model.config["auth_backend"] == "keystone":
+ data_status["keystone"] = self.state.keystone_host
+
+ missing_relations = [k for k, v in data_status.items() if not v]
+
+ return ", ".join(missing_relations)
+
+ @property
+ def relation_state(self) -> Dict[str, Any]:
+ """Collects relation state configuration for pod spec assembly.
+
+ Returns:
+ Dict[str, Any]: relation state information.
+ """
+ relation_state = {
+ "message_host": self.state.message_host,
+ "message_port": self.state.message_port,
+ "database_uri": self.state.database_uri,
+ "prometheus_host": self.state.prometheus_host,
+ "prometheus_port": self.state.prometheus_port,
+ }
+
+ if self.model.config["auth_backend"] == "keystone":
+ relation_state.update(
+ {
+ "keystone_host": self.state.keystone_host,
+ "keystone_port": self.state.keystone_port,
+ "keystone_user_domain_name": self.state.keystone_user_domain_name,
+ "keystone_project_domain_name": self.state.keystone_project_domain_name,
+ "keystone_username": self.state.keystone_username,
+ "keystone_password": self.state.keystone_password,
+ "keystone_service": self.state.keystone_service,
+ }
+ )
+
+ return relation_state
+
+ def configure_pod(self, event: EventBase) -> NoReturn:
+ """Assemble the pod spec and apply it, if possible.
+
+ Args:
+ event (EventBase): Hook or Relation event that started the
+ function.
+ """
+ if missing := self._missing_relations():
+ self.unit.status = BlockedStatus(
+ f"Waiting for {missing} relation{'s' if ',' in missing else ''}"
+ )
+ return
+
+ if not self.unit.is_leader():
+ self.unit.status = ActiveStatus("ready")
+ return
+
+ self.unit.status = MaintenanceStatus("Assembling pod spec")
+
+ # Fetch image information
+ try:
+ self.unit.status = MaintenanceStatus("Fetching image information")
+ image_info = self.image.fetch()
+ except OCIImageResourceError:
+ self.unit.status = BlockedStatus("Error fetching image information")
+ return
+
+ try:
+ pod_spec = make_pod_spec(
+ image_info,
+ self.model.config,
+ self.relation_state,
+ self.model.app.name,
+ self.port,
+ )
+ except ValidationError as exc:
+ self.unit.status = BlockedStatus(str(exc))
+ return
+
+ if self.state.pod_spec != pod_spec:
+ self.model.pod.set_spec(pod_spec)
+ self.state.pod_spec = pod_spec
+
+ self.unit.status = ActiveStatus("ready")
+
+
+if __name__ == "__main__":
+ main(NbiCharm)
--- /dev/null
+#!/usr/bin/env python3
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+import logging
+from pydantic import (
+ BaseModel,
+ conint,
+ constr,
+ IPvAnyNetwork,
+ PositiveInt,
+ validator,
+ ValidationError,
+)
+from typing import Any, Dict, List, Optional
+from urllib.parse import urlparse
+
+logger = logging.getLogger(__name__)
+
+
+class ConfigData(BaseModel):
+ """Configuration data model."""
+
+ enable_test: bool
+ database_commonkey: constr(min_length=1)
+ log_level: constr(regex=r"^(INFO|DEBUG)$")
+ auth_backend: constr(regex=r"^(internal|keystone)$")
+ site_url: Optional[str]
+ max_file_size: Optional[conint(ge=0)]
+ ingress_whitelist_source_range: Optional[IPvAnyNetwork]
+ tls_secret_name: Optional[str]
+
+ @validator("max_file_size", pre=True, always=True)
+ def validate_max_file_size(cls, value, values, **kwargs):
+ site_url = values.get("site_url")
+
+ if not site_url:
+ return value
+
+ parsed = urlparse(site_url)
+
+ if not parsed.scheme.startswith("http"):
+ return value
+
+ if value is None:
+ raise ValueError("max_file_size needs to be defined if site_url is defined")
+
+ return value
+
+ @validator("ingress_whitelist_source_range", pre=True, always=True)
+ def validate_ingress_whitelist_source_range(cls, value, values, **kwargs):
+ if not value:
+ return None
+
+ return value
+
+
+class RelationData(BaseModel):
+ """Relation data model."""
+
+ message_host: str
+ message_port: PositiveInt
+ database_uri: constr(regex=r"^(mongo://)")
+ prometheus_host: str
+ prometheus_port: PositiveInt
+ keystone: bool
+ keystone_host: Optional[constr(min_length=1)]
+ keystone_port: Optional[PositiveInt]
+ keystone_user_domain_name: Optional[constr(min_length=1)]
+ keystone_project_domain_name: Optional[constr(min_length=1)]
+ keystone_username: Optional[constr(min_length=1)]
+ keystone_password: Optional[constr(min_length=1)]
+ keystone_service: Optional[constr(min_length=1)]
+
+ @validator("keystone_host", pre=True, always=True)
+ def validate_keystone_host(cls, value, values, **kwargs):
+ keystone = values.get("keystone")
+
+ if not keystone:
+ return value
+
+ if value is None:
+ raise ValueError(
+ "keystone_host needs to be defined if keystone is configured"
+ )
+
+ return value
+
+ @validator("keystone_port", pre=True, always=True)
+ def validate_keystone_port(cls, value, values, **kwargs):
+ keystone = values.get("keystone")
+
+ if not keystone:
+ return value
+
+ if value is None:
+ raise ValueError(
+ "keystone_port needs to be defined if keystone is configured"
+ )
+
+ return value
+
+ @validator("keystone_user_domain_name", pre=True, always=True)
+ def validate_keystone_user_domain_name(cls, value, values, **kwargs):
+ keystone = values.get("keystone")
+
+ if not keystone:
+ return value
+
+ if value is None:
+ raise ValueError(
+ "keystone_user_domain_name needs to be defined if keystone is configured"
+ )
+
+ return value
+
+ @validator("keystone_project_domain_name", pre=True, always=True)
+ def validate_keystone_project_domain_name(cls, value, values, **kwargs):
+ keystone = values.get("keystone")
+
+ if not keystone:
+ return value
+
+ if value is None:
+ raise ValueError(
+ "keystone_project_domain_name needs to be defined if keystone is configured"
+ )
+
+ return value
+
+ @validator("keystone_username", pre=True, always=True)
+ def validate_keystone_username(cls, value, values, **kwargs):
+ keystone = values.get("keystone")
+
+ if not keystone:
+ return value
+
+ if value is None:
+ raise ValueError(
+ "keystone_username needs to be defined if keystone is configured"
+ )
+
+ return value
+
+ @validator("keystone_password", pre=True, always=True)
+ def validate_keystone_password(cls, value, values, **kwargs):
+ keystone = values.get("keystone")
+
+ if not keystone:
+ return value
+
+ if value is None:
+ raise ValueError(
+ "keystone_password needs to be defined if keystone is configured"
+ )
+
+ return value
+
+ @validator("keystone_service", pre=True, always=True)
+ def validate_keystone_service(cls, value, values, **kwargs):
+ keystone = values.get("keystone")
+
+ if not keystone:
+ return value
+
+ if value is None:
+ raise ValueError(
+ "keystone_service needs to be defined if keystone is configured"
+ )
+
+ return value
+
+
+def _make_pod_ports(port: int) -> List[Dict[str, Any]]:
+ """Generate pod ports details.
+
+ Args:
+ port (int): port to expose.
+
+ Returns:
+ List[Dict[str, Any]]: pod port details.
+ """
+ return [{"name": "nbi", "containerPort": port, "protocol": "TCP"}]
+
+
+def _make_pod_envconfig(
+ config: Dict[str, Any], relation_state: Dict[str, Any]
+) -> Dict[str, Any]:
+ """Generate pod environment configuration.
+
+ Args:
+ config (Dict[str, Any]): configuration information.
+ relation_state (Dict[str, Any]): relation state information.
+
+ Returns:
+ Dict[str, Any]: pod environment configuration.
+ """
+ envconfig = {
+ # General configuration
+ "ALLOW_ANONYMOUS_LOGIN": "yes",
+ "OSMNBI_SERVER_ENABLE_TEST": config["enable_test"],
+ "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public",
+ # Kafka configuration
+ "OSMNBI_MESSAGE_HOST": relation_state["message_host"],
+ "OSMNBI_MESSAGE_DRIVER": "kafka",
+ "OSMNBI_MESSAGE_PORT": relation_state["message_port"],
+ # Database configuration
+ "OSMNBI_DATABASE_DRIVER": "mongo",
+ "OSMNBI_DATABASE_URI": relation_state["database_uri"],
+ "OSMNBI_DATABASE_COMMONKEY": config["database_commonkey"],
+ # Storage configuration
+ "OSMNBI_STORAGE_DRIVER": "mongo",
+ "OSMNBI_STORAGE_PATH": "/app/storage",
+ "OSMNBI_STORAGE_COLLECTION": "files",
+ "OSMNBI_STORAGE_URI": relation_state["database_uri"],
+ # Prometheus configuration
+ "OSMNBI_PROMETHEUS_HOST": relation_state["prometheus_host"],
+ "OSMNBI_PROMETHEUS_PORT": relation_state["prometheus_port"],
+ # Log configuration
+ "OSMNBI_LOG_LEVEL": config["log_level"],
+ }
+
+ if config["auth_backend"] == "internal":
+ envconfig["OSMNBI_AUTHENTICATION_BACKEND"] = "internal"
+ elif config["auth_backend"] == "keystone":
+ envconfig.update(
+ {
+ "OSMNBI_AUTHENTICATION_BACKEND": "keystone",
+ "OSMNBI_AUTHENTICATION_AUTH_URL": relation_state["keystone_host"],
+ "OSMNBI_AUTHENTICATION_AUTH_PORT": relation_state["keystone_port"],
+ "OSMNBI_AUTHENTICATION_USER_DOMAIN_NAME": relation_state[
+ "keystone_user_domain_name"
+ ],
+ "OSMNBI_AUTHENTICATION_PROJECT_DOMAIN_NAME": relation_state[
+ "keystone_project_domain_name"
+ ],
+ "OSMNBI_AUTHENTICATION_SERVICE_USERNAME": relation_state[
+ "keystone_username"
+ ],
+ "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD": relation_state[
+ "keystone_password"
+ ],
+ "OSMNBI_AUTHENTICATION_SERVICE_PROJECT": relation_state[
+ "keystone_service"
+ ],
+ }
+ )
+ else:
+ raise ValueError("auth_backend needs to be either internal or keystone")
+
+ return envconfig
+
+
+def _make_pod_ingress_resources(
+ config: Dict[str, Any], app_name: str, port: int
+) -> List[Dict[str, Any]]:
+ """Generate pod ingress resources.
+
+ Args:
+ config (Dict[str, Any]): configuration information.
+ app_name (str): application name.
+ port (int): port to expose.
+
+ Returns:
+ List[Dict[str, Any]]: pod ingress resources.
+ """
+ site_url = config.get("site_url")
+
+ if not site_url:
+ return
+
+ parsed = urlparse(site_url)
+
+ if not parsed.scheme.startswith("http"):
+ return
+
+ max_file_size = config["max_file_size"]
+ ingress_whitelist_source_range = config["ingress_whitelist_source_range"]
+
+ annotations = {
+ "nginx.ingress.kubernetes.io/proxy-body-size": "{}".format(
+ max_file_size + "m" if max_file_size > 0 else max_file_size
+ )
+ }
+
+ if ingress_whitelist_source_range:
+ annotations[
+ "nginx.ingress.kubernetes.io/whitelist-source-range"
+ ] = ingress_whitelist_source_range
+
+ ingress_spec_tls = None
+
+ if parsed.scheme == "https":
+ ingress_spec_tls = [{"hosts": [parsed.hostname]}]
+ tls_secret_name = config["tls_secret_name"]
+ if tls_secret_name:
+ ingress_spec_tls[0]["secretName"] = tls_secret_name
+ else:
+ annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false"
+
+ ingress = {
+ "name": "{}-ingress".format(app_name),
+ "annotations": annotations,
+ "spec": {
+ "rules": [
+ {
+ "host": parsed.hostname,
+ "http": {
+ "paths": [
+ {
+ "path": "/",
+ "backend": {
+ "serviceName": app_name,
+ "servicePort": port,
+ },
+ }
+ ]
+ },
+ }
+ ]
+ },
+ }
+ if ingress_spec_tls:
+ ingress["spec"]["tls"] = ingress_spec_tls
+
+ return [ingress]
+
+
+def _make_startup_probe() -> Dict[str, Any]:
+ """Generate startup probe.
+
+ Returns:
+ Dict[str, Any]: startup probe.
+ """
+ return {
+ "exec": {"command": ["/usr/bin/pgrep python3"]},
+ "initialDelaySeconds": 60,
+ "timeoutSeconds": 5,
+ }
+
+
+def _make_readiness_probe(port: int) -> Dict[str, Any]:
+ """Generate readiness probe.
+
+ Args:
+ port (int): [description]
+
+ Returns:
+ Dict[str, Any]: readiness probe.
+ """
+ return {
+ "httpGet": {
+ "path": "/osm/",
+ "port": port,
+ },
+ "initialDelaySeconds": 45,
+ "timeoutSeconds": 5,
+ }
+
+
+def _make_liveness_probe(port: int) -> Dict[str, Any]:
+ """Generate liveness probe.
+
+ Args:
+ port (int): [description]
+
+ Returns:
+ Dict[str, Any]: liveness probe.
+ """
+ return {
+ "httpGet": {
+ "path": "/osm/",
+ "port": port,
+ },
+ "initialDelaySeconds": 45,
+ "timeoutSeconds": 5,
+ }
+
+
+def make_pod_spec(
+ image_info: Dict[str, str],
+ config: Dict[str, Any],
+ relation_state: Dict[str, Any],
+ app_name: str = "nbi",
+ port: int = 9999,
+) -> Dict[str, Any]:
+ """Generate the pod spec information.
+
+ Args:
+ image_info (Dict[str, str]): Object provided by
+ OCIImageResource("image").fetch().
+ config (Dict[str, Any]): Configuration information.
+ relation_state (Dict[str, Any]): Relation state information.
+ app_name (str, optional): Application name. Defaults to "nbi".
+ port (int, optional): Port for the container. Defaults to 9999.
+
+ Returns:
+ Dict[str, Any]: Pod spec dictionary for the charm.
+ """
+ if not image_info:
+ return None
+
+ ConfigData(**(config))
+ RelationData(
+ **(relation_state),
+ keystone=True if config.get("auth_backend") == "keystone" else False,
+ )
+
+ ports = _make_pod_ports(port)
+ env_config = _make_pod_envconfig(config, relation_state)
+ ingress_resources = _make_pod_ingress_resources(config, app_name, port)
+
+ return {
+ "version": 3,
+ "containers": [
+ {
+ "name": app_name,
+ "imageDetails": image_info,
+ "imagePullPolicy": "Always",
+ "ports": ports,
+ "envConfig": env_config,
+ }
+ ],
+ "kubernetesResources": {
+ "ingressResources": ingress_resources or [],
+ },
+ }
--- /dev/null
+#!/usr/bin/env python3
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+"""Init mocking for unit tests."""
+
+import sys
+import mock
+
+sys.path.append("src")
+
+oci_image = mock.MagicMock()
+sys.modules["oci_image"] = oci_image
--- /dev/null
+#!/usr/bin/env python3
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+from typing import NoReturn
+import unittest
+from ops.model import BlockedStatus
+
+from ops.testing import Harness
+
+from charm import NbiCharm
+
+
+class TestCharm(unittest.TestCase):
+ """NBI Charm unit tests."""
+
+ def setUp(self) -> NoReturn:
+ """Test setup"""
+ self.harness = Harness(NbiCharm)
+ self.harness.set_leader(is_leader=True)
+ self.harness.begin()
+
+ def test_on_start_without_relations(self) -> NoReturn:
+ """Test installation without any relation."""
+ self.harness.charm.on.start.emit()
+
+ # Verifying status
+ self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+ # Verifying status message
+ self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+ self.assertTrue(
+ self.harness.charm.unit.status.message.startswith("Waiting for ")
+ )
+ self.assertIn("kafka", self.harness.charm.unit.status.message)
+ self.assertIn("mongodb", self.harness.charm.unit.status.message)
+ self.assertIn("prometheus", self.harness.charm.unit.status.message)
+ self.assertNotIn("keystone", self.harness.charm.unit.status.message)
+ self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
+
+ def test_on_start_without_relations_with_keystone(self) -> NoReturn:
+ """Test installation without any relation and keystone enabled."""
+ self.harness.update_config({"auth_backend": "keystone"})
+
+ self.harness.charm.on.start.emit()
+
+ # Verifying status
+ self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+ # Verifying status message
+ self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+ self.assertTrue(
+ self.harness.charm.unit.status.message.startswith("Waiting for ")
+ )
+ self.assertIn("kafka", self.harness.charm.unit.status.message)
+ self.assertIn("mongodb", self.harness.charm.unit.status.message)
+ self.assertIn("prometheus", self.harness.charm.unit.status.message)
+ self.assertIn("keystone", self.harness.charm.unit.status.message)
+ self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
+
+ def test_on_start_with_relations(self) -> NoReturn:
+ """Test deployment without keystone."""
+ expected_result = {
+ "version": 3,
+ "containers": [
+ {
+ "name": "nbi",
+ "imageDetails": self.harness.charm.image.fetch(),
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "name": "nbi",
+ "containerPort": 9999,
+ "protocol": "TCP",
+ }
+ ],
+ "envConfig": {
+ "ALLOW_ANONYMOUS_LOGIN": "yes",
+ "OSMNBI_SERVER_ENABLE_TEST": False,
+ "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public",
+ "OSMNBI_MESSAGE_HOST": "kafka",
+ "OSMNBI_MESSAGE_DRIVER": "kafka",
+ "OSMNBI_MESSAGE_PORT": 9092,
+ "OSMNBI_DATABASE_DRIVER": "mongo",
+ "OSMNBI_DATABASE_URI": "mongo://mongo:27017",
+ "OSMNBI_DATABASE_COMMONKEY": "osm",
+ "OSMNBI_STORAGE_DRIVER": "mongo",
+ "OSMNBI_STORAGE_PATH": "/app/storage",
+ "OSMNBI_STORAGE_COLLECTION": "files",
+ "OSMNBI_STORAGE_URI": "mongo://mongo:27017",
+ "OSMNBI_PROMETHEUS_HOST": "prometheus",
+ "OSMNBI_PROMETHEUS_PORT": 9090,
+ "OSMNBI_LOG_LEVEL": "INFO",
+ "OSMNBI_AUTHENTICATION_BACKEND": "internal",
+ },
+ }
+ ],
+ "kubernetesResources": {
+ "ingressResources": [],
+ },
+ }
+
+ self.harness.charm.on.start.emit()
+
+ # Check if kafka datastore is initialized
+ self.assertIsNone(self.harness.charm.state.message_host)
+ self.assertIsNone(self.harness.charm.state.message_port)
+
+ # Check if mongodb datastore is initialized
+ self.assertIsNone(self.harness.charm.state.database_uri)
+
+ # Check if prometheus datastore is initialized
+ self.assertIsNone(self.harness.charm.state.prometheus_host)
+ self.assertIsNone(self.harness.charm.state.prometheus_port)
+
+ # Initializing the kafka relation
+ kafka_relation_id = self.harness.add_relation("kafka", "kafka")
+ self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
+ self.harness.update_relation_data(
+ kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092}
+ )
+
+ # Initializing the mongo relation
+ mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
+ self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
+ self.harness.update_relation_data(
+ mongodb_relation_id,
+ "mongodb/0",
+ {"connection_string": "mongo://mongo:27017"},
+ )
+
+ # Initializing the prometheus relation
+ prometheus_relation_id = self.harness.add_relation("prometheus", "prometheus")
+ self.harness.add_relation_unit(prometheus_relation_id, "prometheus/0")
+ self.harness.update_relation_data(
+ prometheus_relation_id,
+ "prometheus/0",
+ {"hostname": "prometheus", "port": 9090},
+ )
+
+ # Checking if kafka data is stored
+ self.assertEqual(self.harness.charm.state.message_host, "kafka")
+ self.assertEqual(self.harness.charm.state.message_port, 9092)
+
+ # Checking if mongodb data is stored
+ self.assertEqual(self.harness.charm.state.database_uri, "mongo://mongo:27017")
+
+ # Checking if prometheus data is stored
+ self.assertEqual(self.harness.charm.state.prometheus_host, "prometheus")
+ self.assertEqual(self.harness.charm.state.prometheus_port, 9090)
+
+ # Verifying status
+ self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+ pod_spec, _ = self.harness.get_pod_spec()
+
+ self.assertDictEqual(expected_result, pod_spec)
+
+ def test_on_start_with_relations_with_keystone(self) -> NoReturn:
+ """Test deployment with keystone."""
+ expected_result = {
+ "version": 3,
+ "containers": [
+ {
+ "name": "nbi",
+ "imageDetails": self.harness.charm.image.fetch(),
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "name": "nbi",
+ "containerPort": 9999,
+ "protocol": "TCP",
+ }
+ ],
+ "envConfig": {
+ "ALLOW_ANONYMOUS_LOGIN": "yes",
+ "OSMNBI_SERVER_ENABLE_TEST": False,
+ "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public",
+ "OSMNBI_MESSAGE_HOST": "kafka",
+ "OSMNBI_MESSAGE_DRIVER": "kafka",
+ "OSMNBI_MESSAGE_PORT": 9092,
+ "OSMNBI_DATABASE_DRIVER": "mongo",
+ "OSMNBI_DATABASE_URI": "mongo://mongo:27017",
+ "OSMNBI_DATABASE_COMMONKEY": "osm",
+ "OSMNBI_STORAGE_DRIVER": "mongo",
+ "OSMNBI_STORAGE_PATH": "/app/storage",
+ "OSMNBI_STORAGE_COLLECTION": "files",
+ "OSMNBI_STORAGE_URI": "mongo://mongo:27017",
+ "OSMNBI_PROMETHEUS_HOST": "prometheus",
+ "OSMNBI_PROMETHEUS_PORT": 9090,
+ "OSMNBI_LOG_LEVEL": "INFO",
+ "OSMNBI_AUTHENTICATION_BACKEND": "keystone",
+ "OSMNBI_AUTHENTICATION_AUTH_URL": "keystone",
+ "OSMNBI_AUTHENTICATION_AUTH_PORT": 5000,
+ "OSMNBI_AUTHENTICATION_USER_DOMAIN_NAME": "default",
+ "OSMNBI_AUTHENTICATION_PROJECT_DOMAIN_NAME": "default",
+ "OSMNBI_AUTHENTICATION_SERVICE_USERNAME": "nbi",
+ "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD": "nbi",
+ "OSMNBI_AUTHENTICATION_SERVICE_PROJECT": "service",
+ },
+ }
+ ],
+ "kubernetesResources": {
+ "ingressResources": [],
+ },
+ }
+
+ self.harness.update_config({"auth_backend": "keystone"})
+
+ self.harness.charm.on.start.emit()
+
+ # Check if kafka datastore is initialized
+ self.assertIsNone(self.harness.charm.state.message_host)
+ self.assertIsNone(self.harness.charm.state.message_port)
+
+ # Check if mongodb datastore is initialized
+ self.assertIsNone(self.harness.charm.state.database_uri)
+
+ # Check if prometheus datastore is initialized
+ self.assertIsNone(self.harness.charm.state.prometheus_host)
+ self.assertIsNone(self.harness.charm.state.prometheus_port)
+
+ # Check if keystone datastore is initialized
+ self.assertIsNone(self.harness.charm.state.keystone_host)
+ self.assertIsNone(self.harness.charm.state.keystone_port)
+ self.assertIsNone(self.harness.charm.state.keystone_user_domain_name)
+ self.assertIsNone(self.harness.charm.state.keystone_project_domain_name)
+ self.assertIsNone(self.harness.charm.state.keystone_username)
+ self.assertIsNone(self.harness.charm.state.keystone_password)
+ self.assertIsNone(self.harness.charm.state.keystone_service)
+
+ # Initializing the kafka relation
+ kafka_relation_id = self.harness.add_relation("kafka", "kafka")
+ self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
+ self.harness.update_relation_data(
+ kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092}
+ )
+
+ # Initializing the mongodb relation
+ mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
+ self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
+ self.harness.update_relation_data(
+ mongodb_relation_id,
+ "mongodb/0",
+ {"connection_string": "mongo://mongo:27017"},
+ )
+
+ # Initializing the prometheus relation
+ promethues_relation_id = self.harness.add_relation("prometheus", "prometheus")
+ self.harness.add_relation_unit(promethues_relation_id, "prometheus/0")
+ self.harness.update_relation_data(
+ promethues_relation_id,
+ "prometheus/0",
+ {"hostname": "prometheus", "port": 9090},
+ )
+
+ # Initializing the keystone relation
+ keystone_relation_id = self.harness.add_relation("keystone", "keystone")
+ self.harness.add_relation_unit(keystone_relation_id, "keystone/0")
+ self.harness.update_relation_data(
+ keystone_relation_id,
+ "keystone/0",
+ {
+ "host": "keystone",
+ "port": 5000,
+ "user_domain_name": "default",
+ "project_domain_name": "default",
+ "username": "nbi",
+ "password": "nbi",
+ "service": "service",
+ },
+ )
+
+ # Checking if kafka data is stored
+ self.assertEqual(self.harness.charm.state.message_host, "kafka")
+ self.assertEqual(self.harness.charm.state.message_port, 9092)
+
+ # Checking if mongodb data is stored
+ self.assertEqual(self.harness.charm.state.database_uri, "mongo://mongo:27017")
+
+ # Checking if prometheus data is stored
+ self.assertEqual(self.harness.charm.state.prometheus_host, "prometheus")
+ self.assertEqual(self.harness.charm.state.prometheus_port, 9090)
+
+ # Checking if keystone data is stored
+ self.assertEqual(self.harness.charm.state.keystone_host, "keystone")
+ self.assertEqual(self.harness.charm.state.keystone_port, 5000)
+ self.assertEqual(self.harness.charm.state.keystone_user_domain_name, "default")
+ self.assertEqual(
+ self.harness.charm.state.keystone_project_domain_name, "default"
+ )
+ self.assertEqual(self.harness.charm.state.keystone_username, "nbi")
+ self.assertEqual(self.harness.charm.state.keystone_password, "nbi")
+ self.assertEqual(self.harness.charm.state.keystone_service, "service")
+
+ # Verifying status
+ self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+ pod_spec, _ = self.harness.get_pod_spec()
+
+ self.assertDictEqual(expected_result, pod_spec)
+
+ def test_ingress_resources_without_http(self) -> NoReturn:
+ """Test ingress resources without HTTP."""
+ expected_result = {
+ "version": 3,
+ "containers": [
+ {
+ "name": "nbi",
+ "imageDetails": self.harness.charm.image.fetch(),
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "name": "nbi",
+ "containerPort": 9999,
+ "protocol": "TCP",
+ }
+ ],
+ "envConfig": {
+ "ALLOW_ANONYMOUS_LOGIN": "yes",
+ "OSMNBI_SERVER_ENABLE_TEST": False,
+ "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public",
+ "OSMNBI_MESSAGE_HOST": "kafka",
+ "OSMNBI_MESSAGE_DRIVER": "kafka",
+ "OSMNBI_MESSAGE_PORT": 9092,
+ "OSMNBI_DATABASE_DRIVER": "mongo",
+ "OSMNBI_DATABASE_URI": "mongo://mongo:27017",
+ "OSMNBI_DATABASE_COMMONKEY": "osm",
+ "OSMNBI_STORAGE_DRIVER": "mongo",
+ "OSMNBI_STORAGE_PATH": "/app/storage",
+ "OSMNBI_STORAGE_COLLECTION": "files",
+ "OSMNBI_STORAGE_URI": "mongo://mongo:27017",
+ "OSMNBI_PROMETHEUS_HOST": "prometheus",
+ "OSMNBI_PROMETHEUS_PORT": 9090,
+ "OSMNBI_LOG_LEVEL": "INFO",
+ "OSMNBI_AUTHENTICATION_BACKEND": "internal",
+ },
+ }
+ ],
+ "kubernetesResources": {
+ "ingressResources": [],
+ },
+ }
+
+ self.harness.charm.on.start.emit()
+
+ # Initializing the kafka relation
+ kafka_relation_id = self.harness.add_relation("kafka", "kafka")
+ self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
+ self.harness.update_relation_data(
+ kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092}
+ )
+
+ # Initializing the mongodb relation
+ mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
+ self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
+ self.harness.update_relation_data(
+ mongodb_relation_id,
+ "mongodb/0",
+ {"connection_string": "mongo://mongo:27017"},
+ )
+
+ # Initializing the prometheus relation
+ promethues_relation_id = self.harness.add_relation("prometheus", "prometheus")
+ self.harness.add_relation_unit(promethues_relation_id, "prometheus/0")
+ self.harness.update_relation_data(
+ promethues_relation_id,
+ "prometheus/0",
+ {"hostname": "prometheus", "port": 9090},
+ )
+
+ self.harness.update_config({"site_url": "nbi"})
+
+ pod_spec, _ = self.harness.get_pod_spec()
+
+ self.assertDictEqual(expected_result, pod_spec)
+
+ def test_ingress_resources_with_http(self) -> NoReturn:
+ """Test ingress resources with HTTP."""
+ expected_result = {
+ "version": 3,
+ "containers": [
+ {
+ "name": "nbi",
+ "imageDetails": self.harness.charm.image.fetch(),
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "name": "nbi",
+ "containerPort": 9999,
+ "protocol": "TCP",
+ }
+ ],
+ "envConfig": {
+ "ALLOW_ANONYMOUS_LOGIN": "yes",
+ "OSMNBI_SERVER_ENABLE_TEST": False,
+ "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public",
+ "OSMNBI_MESSAGE_HOST": "kafka",
+ "OSMNBI_MESSAGE_DRIVER": "kafka",
+ "OSMNBI_MESSAGE_PORT": 9092,
+ "OSMNBI_DATABASE_DRIVER": "mongo",
+ "OSMNBI_DATABASE_URI": "mongo://mongo:27017",
+ "OSMNBI_DATABASE_COMMONKEY": "osm",
+ "OSMNBI_STORAGE_DRIVER": "mongo",
+ "OSMNBI_STORAGE_PATH": "/app/storage",
+ "OSMNBI_STORAGE_COLLECTION": "files",
+ "OSMNBI_STORAGE_URI": "mongo://mongo:27017",
+ "OSMNBI_PROMETHEUS_HOST": "prometheus",
+ "OSMNBI_PROMETHEUS_PORT": 9090,
+ "OSMNBI_LOG_LEVEL": "INFO",
+ "OSMNBI_AUTHENTICATION_BACKEND": "internal",
+ },
+ }
+ ],
+ "kubernetesResources": {
+ "ingressResources": [
+ {
+ "name": "nbi-ingress",
+ "annotations": {
+ "nginx.ingress.kubernetes.io/proxy-body-size": "0",
+ "nginx.ingress.kubernetes.io/ssl-redirect": "false",
+ },
+ "spec": {
+ "rules": [
+ {
+ "host": "nbi",
+ "http": {
+ "paths": [
+ {
+ "path": "/",
+ "backend": {
+ "serviceName": "nbi",
+ "servicePort": 9999,
+ },
+ }
+ ]
+ },
+ }
+ ]
+ },
+ }
+ ],
+ },
+ }
+
+ self.harness.charm.on.start.emit()
+
+ # Initializing the kafka relation
+ kafka_relation_id = self.harness.add_relation("kafka", "kafka")
+ self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
+ self.harness.update_relation_data(
+ kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092}
+ )
+
+ # Initializing the mongodb relation
+ mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
+ self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
+ self.harness.update_relation_data(
+ mongodb_relation_id,
+ "mongodb/0",
+ {"connection_string": "mongo://mongo:27017"},
+ )
+
+ # Initializing the prometheus relation
+ promethues_relation_id = self.harness.add_relation("prometheus", "prometheus")
+ self.harness.add_relation_unit(promethues_relation_id, "prometheus/0")
+ self.harness.update_relation_data(
+ promethues_relation_id,
+ "prometheus/0",
+ {"hostname": "prometheus", "port": 9090},
+ )
+
+ self.harness.update_config({"site_url": "http://nbi"})
+
+ pod_spec, _ = self.harness.get_pod_spec()
+
+ self.assertDictEqual(expected_result, pod_spec)
+
+ def test_ingress_resources_with_https(self) -> NoReturn:
+ """Test ingress resources with HTTPS."""
+ expected_result = {
+ "version": 3,
+ "containers": [
+ {
+ "name": "nbi",
+ "imageDetails": self.harness.charm.image.fetch(),
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "name": "nbi",
+ "containerPort": 9999,
+ "protocol": "TCP",
+ }
+ ],
+ "envConfig": {
+ "ALLOW_ANONYMOUS_LOGIN": "yes",
+ "OSMNBI_SERVER_ENABLE_TEST": False,
+ "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public",
+ "OSMNBI_MESSAGE_HOST": "kafka",
+ "OSMNBI_MESSAGE_DRIVER": "kafka",
+ "OSMNBI_MESSAGE_PORT": 9092,
+ "OSMNBI_DATABASE_DRIVER": "mongo",
+ "OSMNBI_DATABASE_URI": "mongo://mongo:27017",
+ "OSMNBI_DATABASE_COMMONKEY": "osm",
+ "OSMNBI_STORAGE_DRIVER": "mongo",
+ "OSMNBI_STORAGE_PATH": "/app/storage",
+ "OSMNBI_STORAGE_COLLECTION": "files",
+ "OSMNBI_STORAGE_URI": "mongo://mongo:27017",
+ "OSMNBI_PROMETHEUS_HOST": "prometheus",
+ "OSMNBI_PROMETHEUS_PORT": 9090,
+ "OSMNBI_LOG_LEVEL": "INFO",
+ "OSMNBI_AUTHENTICATION_BACKEND": "internal",
+ },
+ }
+ ],
+ "kubernetesResources": {
+ "ingressResources": [
+ {
+ "name": "nbi-ingress",
+ "annotations": {
+ "nginx.ingress.kubernetes.io/proxy-body-size": "0",
+ },
+ "spec": {
+ "rules": [
+ {
+ "host": "nbi",
+ "http": {
+ "paths": [
+ {
+ "path": "/",
+ "backend": {
+ "serviceName": "nbi",
+ "servicePort": 9999,
+ },
+ }
+ ]
+ },
+ }
+ ],
+ "tls": [{"hosts": ["nbi"], "secretName": "nbi"}],
+ },
+ }
+ ],
+ },
+ }
+
+ self.harness.charm.on.start.emit()
+
+ # Initializing the kafka relation
+ kafka_relation_id = self.harness.add_relation("kafka", "kafka")
+ self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
+ self.harness.update_relation_data(
+ kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092}
+ )
+
+ # Initializing the mongodb relation
+ mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
+ self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
+ self.harness.update_relation_data(
+ mongodb_relation_id,
+ "mongodb/0",
+ {"connection_string": "mongo://mongo:27017"},
+ )
+
+ # Initializing the prometheus relation
+ promethues_relation_id = self.harness.add_relation("prometheus", "prometheus")
+ self.harness.add_relation_unit(promethues_relation_id, "prometheus/0")
+ self.harness.update_relation_data(
+ promethues_relation_id,
+ "prometheus/0",
+ {"hostname": "prometheus", "port": 9090},
+ )
+
+ self.harness.update_config(
+ {"site_url": "https://nbi", "tls_secret_name": "nbi"}
+ )
+
+ pod_spec, _ = self.harness.get_pod_spec()
+
+ self.assertDictEqual(expected_result, pod_spec)
+
+ def test_ingress_resources_with_https_and_ingress_whitelist(self) -> NoReturn:
+ """Test ingress resources with HTTPS and ingress whitelist."""
+ expected_result = {
+ "version": 3,
+ "containers": [
+ {
+ "name": "nbi",
+ "imageDetails": self.harness.charm.image.fetch(),
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "name": "nbi",
+ "containerPort": 9999,
+ "protocol": "TCP",
+ }
+ ],
+ "envConfig": {
+ "ALLOW_ANONYMOUS_LOGIN": "yes",
+ "OSMNBI_SERVER_ENABLE_TEST": False,
+ "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public",
+ "OSMNBI_MESSAGE_HOST": "kafka",
+ "OSMNBI_MESSAGE_DRIVER": "kafka",
+ "OSMNBI_MESSAGE_PORT": 9092,
+ "OSMNBI_DATABASE_DRIVER": "mongo",
+ "OSMNBI_DATABASE_URI": "mongo://mongo:27017",
+ "OSMNBI_DATABASE_COMMONKEY": "osm",
+ "OSMNBI_STORAGE_DRIVER": "mongo",
+ "OSMNBI_STORAGE_PATH": "/app/storage",
+ "OSMNBI_STORAGE_COLLECTION": "files",
+ "OSMNBI_STORAGE_URI": "mongo://mongo:27017",
+ "OSMNBI_PROMETHEUS_HOST": "prometheus",
+ "OSMNBI_PROMETHEUS_PORT": 9090,
+ "OSMNBI_LOG_LEVEL": "INFO",
+ "OSMNBI_AUTHENTICATION_BACKEND": "internal",
+ },
+ }
+ ],
+ "kubernetesResources": {
+ "ingressResources": [
+ {
+ "name": "nbi-ingress",
+ "annotations": {
+ "nginx.ingress.kubernetes.io/proxy-body-size": "0",
+ "nginx.ingress.kubernetes.io/whitelist-source-range": "0.0.0.0/0",
+ },
+ "spec": {
+ "rules": [
+ {
+ "host": "nbi",
+ "http": {
+ "paths": [
+ {
+ "path": "/",
+ "backend": {
+ "serviceName": "nbi",
+ "servicePort": 9999,
+ },
+ }
+ ]
+ },
+ }
+ ],
+ "tls": [{"hosts": ["nbi"], "secretName": "nbi"}],
+ },
+ }
+ ],
+ },
+ }
+
+ self.harness.charm.on.start.emit()
+
+ # Initializing the kafka relation
+ kafka_relation_id = self.harness.add_relation("kafka", "kafka")
+ self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
+ self.harness.update_relation_data(
+ kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092}
+ )
+
+ # Initializing the mongodb relation
+ mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
+ self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
+ self.harness.update_relation_data(
+ mongodb_relation_id,
+ "mongodb/0",
+ {"connection_string": "mongo://mongo:27017"},
+ )
+
+ # Initializing the prometheus relation
+ promethues_relation_id = self.harness.add_relation("prometheus", "prometheus")
+ self.harness.add_relation_unit(promethues_relation_id, "prometheus/0")
+ self.harness.update_relation_data(
+ promethues_relation_id,
+ "prometheus/0",
+ {"hostname": "prometheus", "port": 9090},
+ )
+
+ self.harness.update_config(
+ {
+ "site_url": "https://nbi",
+ "tls_secret_name": "nbi",
+ "ingress_whitelist_source_range": "0.0.0.0/0",
+ }
+ )
+
+ pod_spec, _ = self.harness.get_pod_spec()
+
+ self.assertDictEqual(expected_result, pod_spec)
+
+ def test_on_kafka_relation_changed(self) -> NoReturn:
+ """Test to see if kafka relation is updated."""
+ self.harness.charm.on.start.emit()
+
+ self.assertIsNone(self.harness.charm.state.message_host)
+ self.assertIsNone(self.harness.charm.state.message_port)
+
+ relation_id = self.harness.add_relation("kafka", "kafka")
+ self.harness.add_relation_unit(relation_id, "kafka/0")
+ self.harness.update_relation_data(
+ relation_id, "kafka/0", {"host": "kafka", "port": 9092}
+ )
+
+ self.assertEqual(self.harness.charm.state.message_host, "kafka")
+ self.assertEqual(self.harness.charm.state.message_port, 9092)
+
+ # Verifying status
+ self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+ # Verifying status message
+ self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+ self.assertTrue(
+ self.harness.charm.unit.status.message.startswith("Waiting for ")
+ )
+ self.assertNotIn("kafka", self.harness.charm.unit.status.message)
+ self.assertIn("mongodb", self.harness.charm.unit.status.message)
+ self.assertIn("prometheus", self.harness.charm.unit.status.message)
+ self.assertNotIn("keystone", self.harness.charm.unit.status.message)
+ self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
+
+ def test_on_mongodb_relation_changed(self) -> NoReturn:
+ """Test to see if mongodb relation is updated."""
+ self.harness.charm.on.start.emit()
+
+ self.assertIsNone(self.harness.charm.state.database_uri)
+
+ relation_id = self.harness.add_relation("mongodb", "mongodb")
+ self.harness.add_relation_unit(relation_id, "mongodb/0")
+ self.harness.update_relation_data(
+ relation_id, "mongodb/0", {"connection_string": "mongo://mongo:27017"}
+ )
+
+ self.assertEqual(self.harness.charm.state.database_uri, "mongo://mongo:27017")
+
+ # Verifying status
+ self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+ # Verifying status message
+ self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+ self.assertTrue(
+ self.harness.charm.unit.status.message.startswith("Waiting for ")
+ )
+ self.assertIn("kafka", self.harness.charm.unit.status.message)
+ self.assertNotIn("mongodb", self.harness.charm.unit.status.message)
+ self.assertIn("prometheus", self.harness.charm.unit.status.message)
+ self.assertNotIn("keystone", self.harness.charm.unit.status.message)
+ self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
+
+ def test_on_prometheus_relation_changed(self) -> NoReturn:
+ """Test to see if prometheus relation is updated."""
+ self.harness.charm.on.start.emit()
+
+ self.assertIsNone(self.harness.charm.state.prometheus_host)
+ self.assertIsNone(self.harness.charm.state.prometheus_port)
+
+ relation_id = self.harness.add_relation("prometheus", "prometheus")
+ self.harness.add_relation_unit(relation_id, "prometheus/0")
+ self.harness.update_relation_data(
+ relation_id, "prometheus/0", {"hostname": "prometheus", "port": 9090}
+ )
+
+ self.assertEqual(self.harness.charm.state.prometheus_host, "prometheus")
+ self.assertEqual(self.harness.charm.state.prometheus_port, 9090)
+
+ # Verifying status
+ self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+ # Verifying status message
+ self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+ self.assertTrue(
+ self.harness.charm.unit.status.message.startswith("Waiting for ")
+ )
+ self.assertIn("kafka", self.harness.charm.unit.status.message)
+ self.assertIn("mongodb", self.harness.charm.unit.status.message)
+ self.assertNotIn("prometheus", self.harness.charm.unit.status.message)
+ self.assertNotIn("keystone", self.harness.charm.unit.status.message)
+ self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
+
+ def test_on_keystone_relation_changed(self) -> NoReturn:
+ """Test to see if keystone relation is updated."""
+ self.harness.update_config({"auth_backend": "keystone"})
+
+ self.harness.charm.on.start.emit()
+
+ self.assertIsNone(self.harness.charm.state.keystone_host)
+ self.assertIsNone(self.harness.charm.state.keystone_port)
+ self.assertIsNone(self.harness.charm.state.keystone_user_domain_name)
+ self.assertIsNone(self.harness.charm.state.keystone_project_domain_name)
+ self.assertIsNone(self.harness.charm.state.keystone_username)
+ self.assertIsNone(self.harness.charm.state.keystone_password)
+ self.assertIsNone(self.harness.charm.state.keystone_service)
+
+ relation_id = self.harness.add_relation("keystone", "keystone")
+ self.harness.add_relation_unit(relation_id, "keystone/0")
+ self.harness.update_relation_data(
+ relation_id,
+ "keystone/0",
+ {
+ "host": "keystone",
+ "port": 5000,
+ "user_domain_name": "default",
+ "project_domain_name": "default",
+ "username": "nbi",
+ "password": "nbi",
+ "service": "service",
+ },
+ )
+
+ self.assertEqual(self.harness.charm.state.keystone_host, "keystone")
+ self.assertEqual(self.harness.charm.state.keystone_port, 5000)
+ self.assertEqual(self.harness.charm.state.keystone_user_domain_name, "default")
+ self.assertEqual(
+ self.harness.charm.state.keystone_project_domain_name, "default"
+ )
+ self.assertEqual(self.harness.charm.state.keystone_username, "nbi")
+ self.assertEqual(self.harness.charm.state.keystone_password, "nbi")
+ self.assertEqual(self.harness.charm.state.keystone_service, "service")
+
+ # Verifying status
+ self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+ # Verifying status message
+ self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+ self.assertTrue(
+ self.harness.charm.unit.status.message.startswith("Waiting for ")
+ )
+ self.assertIn("kafka", self.harness.charm.unit.status.message)
+ self.assertIn("mongodb", self.harness.charm.unit.status.message)
+ self.assertIn("prometheus", self.harness.charm.unit.status.message)
+ self.assertNotIn("keystone", self.harness.charm.unit.status.message)
+ self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
+
+ def test_publish_nbi_info(self) -> NoReturn:
+ """Test to see if nbi relation is updated."""
+ expected_result = {
+ "host": "nbi",
+ "port": "9999",
+ }
+
+ self.harness.charm.on.start.emit()
+
+ relation_id = self.harness.add_relation("nbi", "ng-ui")
+ self.harness.add_relation_unit(relation_id, "ng-ui/0")
+ relation_data = self.harness.get_relation_data(relation_id, "nbi")
+
+ self.assertDictEqual(expected_result, relation_data)
+
+
+if __name__ == "__main__":
+ unittest.main()
--- /dev/null
+#!/usr/bin/env python3
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+from pydantic import ValidationError
+from typing import NoReturn
+import unittest
+
+import pod_spec
+
+
+class TestPodSpec(unittest.TestCase):
+ """Pod spec unit tests."""
+
+ def test_make_pod_ports(self) -> NoReturn:
+ """Testing make pod ports."""
+ port = 9999
+
+ expected_result = [
+ {
+ "name": "nbi",
+ "containerPort": port,
+ "protocol": "TCP",
+ }
+ ]
+
+ pod_ports = pod_spec._make_pod_ports(port)
+
+ self.assertListEqual(expected_result, pod_ports)
+
+ def test_make_pod_envconfig_without_keystone(self) -> NoReturn:
+ """Teting make pod envconfig without Keystone."""
+ config = {
+ "enable_test": False,
+ "database_commonkey": "commonkey",
+ "log_level": "DEBUG",
+ "auth_backend": "internal",
+ }
+ relation_state = {
+ "message_host": "kafka",
+ "message_port": 9090,
+ "database_uri": "mongo://mongo",
+ "prometheus_host": "prometheus",
+ "prometheus_port": 9082,
+ }
+
+ expected_result = {
+ "ALLOW_ANONYMOUS_LOGIN": "yes",
+ "OSMNBI_SERVER_ENABLE_TEST": config["enable_test"],
+ "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public",
+ "OSMNBI_MESSAGE_HOST": relation_state["message_host"],
+ "OSMNBI_MESSAGE_DRIVER": "kafka",
+ "OSMNBI_MESSAGE_PORT": relation_state["message_port"],
+ "OSMNBI_DATABASE_DRIVER": "mongo",
+ "OSMNBI_DATABASE_URI": relation_state["database_uri"],
+ "OSMNBI_DATABASE_COMMONKEY": config["database_commonkey"],
+ "OSMNBI_STORAGE_DRIVER": "mongo",
+ "OSMNBI_STORAGE_PATH": "/app/storage",
+ "OSMNBI_STORAGE_COLLECTION": "files",
+ "OSMNBI_STORAGE_URI": relation_state["database_uri"],
+ "OSMNBI_PROMETHEUS_HOST": relation_state["prometheus_host"],
+ "OSMNBI_PROMETHEUS_PORT": relation_state["prometheus_port"],
+ "OSMNBI_LOG_LEVEL": config["log_level"],
+ "OSMNBI_AUTHENTICATION_BACKEND": config["auth_backend"],
+ }
+
+ pod_envconfig = pod_spec._make_pod_envconfig(config, relation_state)
+
+ self.assertDictEqual(expected_result, pod_envconfig)
+
+ def test_make_pod_envconfig_with_keystone(self) -> NoReturn:
+ """Teting make pod envconfig with Keystone."""
+ config = {
+ "enable_test": False,
+ "database_commonkey": "commonkey",
+ "log_level": "DEBUG",
+ "auth_backend": "keystone",
+ }
+ relation_state = {
+ "message_host": "kafka",
+ "message_port": 9090,
+ "database_uri": "mongo://mongo",
+ "prometheus_host": "prometheus",
+ "prometheus_port": 9082,
+ "keystone_host": "keystone",
+ "keystone_port": 5000,
+ "keystone_user_domain_name": "user_domain",
+ "keystone_project_domain_name": "project_domain",
+ "keystone_username": "username",
+ "keystone_password": "password",
+ "keystone_service": "service",
+ }
+
+ expected_result = {
+ "ALLOW_ANONYMOUS_LOGIN": "yes",
+ "OSMNBI_SERVER_ENABLE_TEST": config["enable_test"],
+ "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public",
+ "OSMNBI_MESSAGE_HOST": relation_state["message_host"],
+ "OSMNBI_MESSAGE_DRIVER": "kafka",
+ "OSMNBI_MESSAGE_PORT": relation_state["message_port"],
+ "OSMNBI_DATABASE_DRIVER": "mongo",
+ "OSMNBI_DATABASE_URI": relation_state["database_uri"],
+ "OSMNBI_DATABASE_COMMONKEY": config["database_commonkey"],
+ "OSMNBI_STORAGE_DRIVER": "mongo",
+ "OSMNBI_STORAGE_PATH": "/app/storage",
+ "OSMNBI_STORAGE_COLLECTION": "files",
+ "OSMNBI_STORAGE_URI": relation_state["database_uri"],
+ "OSMNBI_PROMETHEUS_HOST": relation_state["prometheus_host"],
+ "OSMNBI_PROMETHEUS_PORT": relation_state["prometheus_port"],
+ "OSMNBI_LOG_LEVEL": config["log_level"],
+ "OSMNBI_AUTHENTICATION_BACKEND": config["auth_backend"],
+ "OSMNBI_AUTHENTICATION_AUTH_URL": relation_state["keystone_host"],
+ "OSMNBI_AUTHENTICATION_AUTH_PORT": relation_state["keystone_port"],
+ "OSMNBI_AUTHENTICATION_USER_DOMAIN_NAME": relation_state[
+ "keystone_user_domain_name"
+ ],
+ "OSMNBI_AUTHENTICATION_PROJECT_DOMAIN_NAME": relation_state[
+ "keystone_project_domain_name"
+ ],
+ "OSMNBI_AUTHENTICATION_SERVICE_USERNAME": relation_state[
+ "keystone_username"
+ ],
+ "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD": relation_state[
+ "keystone_password"
+ ],
+ "OSMNBI_AUTHENTICATION_SERVICE_PROJECT": relation_state["keystone_service"],
+ }
+
+ pod_envconfig = pod_spec._make_pod_envconfig(config, relation_state)
+
+ self.assertDictEqual(expected_result, pod_envconfig)
+
+ def test_make_pod_envconfig_wrong_auth_backend(self) -> NoReturn:
+ """Teting make pod envconfig with wrong auth_backend."""
+ config = {
+ "enable_test": False,
+ "database_commonkey": "commonkey",
+ "log_level": "DEBUG",
+ "auth_backend": "kerberos",
+ }
+ relation_state = {
+ "message_host": "kafka",
+ "message_port": 9090,
+ "database_uri": "mongo://mongo",
+ "prometheus_host": "prometheus",
+ "prometheus_port": 9082,
+ "keystone_host": "keystone",
+ "keystone_port": 5000,
+ "keystone_user_domain_name": "user_domain",
+ "keystone_project_domain_name": "project_domain",
+ "keystone_username": "username",
+ "keystone_password": "password",
+ "keystone_service": "service",
+ }
+
+ with self.assertRaises(ValueError) as exc:
+ pod_spec._make_pod_envconfig(config, relation_state)
+
+ self.assertTrue(
+ "auth_backend needs to be either internal or keystone" in str(exc.exception)
+ )
+
+ def test_make_pod_ingress_resources_without_site_url(self) -> NoReturn:
+ """Testing make pod ingress resources without site_url."""
+ config = {"site_url": ""}
+ app_name = "nbi"
+ port = 9999
+
+ pod_ingress_resources = pod_spec._make_pod_ingress_resources(
+ config, app_name, port
+ )
+
+ self.assertIsNone(pod_ingress_resources)
+
+ def test_make_pod_ingress_resources(self) -> NoReturn:
+ """Testing make pod ingress resources."""
+ config = {
+ "site_url": "http://nbi",
+ "max_file_size": 0,
+ "ingress_whitelist_source_range": "",
+ }
+ app_name = "nbi"
+ port = 9999
+
+ expected_result = [
+ {
+ "name": f"{app_name}-ingress",
+ "annotations": {
+ "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}",
+ "nginx.ingress.kubernetes.io/ssl-redirect": "false",
+ },
+ "spec": {
+ "rules": [
+ {
+ "host": app_name,
+ "http": {
+ "paths": [
+ {
+ "path": "/",
+ "backend": {
+ "serviceName": app_name,
+ "servicePort": port,
+ },
+ }
+ ]
+ },
+ }
+ ]
+ },
+ }
+ ]
+
+ pod_ingress_resources = pod_spec._make_pod_ingress_resources(
+ config, app_name, port
+ )
+
+ self.assertListEqual(expected_result, pod_ingress_resources)
+
+ def test_make_pod_ingress_resources_with_whitelist_source_range(self) -> NoReturn:
+ """Testing make pod ingress resources with whitelist_source_range."""
+ config = {
+ "site_url": "http://nbi",
+ "max_file_size": 0,
+ "ingress_whitelist_source_range": "0.0.0.0/0",
+ }
+ app_name = "nbi"
+ port = 9999
+
+ expected_result = [
+ {
+ "name": f"{app_name}-ingress",
+ "annotations": {
+ "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}",
+ "nginx.ingress.kubernetes.io/ssl-redirect": "false",
+ "nginx.ingress.kubernetes.io/whitelist-source-range": config[
+ "ingress_whitelist_source_range"
+ ],
+ },
+ "spec": {
+ "rules": [
+ {
+ "host": app_name,
+ "http": {
+ "paths": [
+ {
+ "path": "/",
+ "backend": {
+ "serviceName": app_name,
+ "servicePort": port,
+ },
+ }
+ ]
+ },
+ }
+ ]
+ },
+ }
+ ]
+
+ pod_ingress_resources = pod_spec._make_pod_ingress_resources(
+ config, app_name, port
+ )
+
+ self.assertListEqual(expected_result, pod_ingress_resources)
+
+ def test_make_pod_ingress_resources_with_https(self) -> NoReturn:
+ """Testing make pod ingress resources with HTTPs."""
+ config = {
+ "site_url": "https://nbi",
+ "max_file_size": 0,
+ "ingress_whitelist_source_range": "",
+ "tls_secret_name": "",
+ }
+ app_name = "nbi"
+ port = 9999
+
+ expected_result = [
+ {
+ "name": f"{app_name}-ingress",
+ "annotations": {
+ "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}",
+ },
+ "spec": {
+ "rules": [
+ {
+ "host": app_name,
+ "http": {
+ "paths": [
+ {
+ "path": "/",
+ "backend": {
+ "serviceName": app_name,
+ "servicePort": port,
+ },
+ }
+ ]
+ },
+ }
+ ],
+ "tls": [{"hosts": [app_name]}],
+ },
+ }
+ ]
+
+ pod_ingress_resources = pod_spec._make_pod_ingress_resources(
+ config, app_name, port
+ )
+
+ self.assertListEqual(expected_result, pod_ingress_resources)
+
+ def test_make_pod_ingress_resources_with_https_tls_secret_name(self) -> NoReturn:
+ """Testing make pod ingress resources with HTTPs and TLS secret name."""
+ config = {
+ "site_url": "https://nbi",
+ "max_file_size": 0,
+ "ingress_whitelist_source_range": "",
+ "tls_secret_name": "secret_name",
+ }
+ app_name = "nbi"
+ port = 9999
+
+ expected_result = [
+ {
+ "name": f"{app_name}-ingress",
+ "annotations": {
+ "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}",
+ },
+ "spec": {
+ "rules": [
+ {
+ "host": app_name,
+ "http": {
+ "paths": [
+ {
+ "path": "/",
+ "backend": {
+ "serviceName": app_name,
+ "servicePort": port,
+ },
+ }
+ ]
+ },
+ }
+ ],
+ "tls": [
+ {"hosts": [app_name], "secretName": config["tls_secret_name"]}
+ ],
+ },
+ }
+ ]
+
+ pod_ingress_resources = pod_spec._make_pod_ingress_resources(
+ config, app_name, port
+ )
+
+ self.assertListEqual(expected_result, pod_ingress_resources)
+
+ def test_make_startup_probe(self) -> NoReturn:
+ """Testing make startup probe."""
+ expected_result = {
+ "exec": {"command": ["/usr/bin/pgrep python3"]},
+ "initialDelaySeconds": 60,
+ "timeoutSeconds": 5,
+ }
+
+ startup_probe = pod_spec._make_startup_probe()
+
+ self.assertDictEqual(expected_result, startup_probe)
+
+ def test_make_readiness_probe(self) -> NoReturn:
+ """Testing make readiness probe."""
+ port = 9999
+
+ expected_result = {
+ "httpGet": {
+ "path": "/osm/",
+ "port": port,
+ },
+ "initialDelaySeconds": 45,
+ "timeoutSeconds": 5,
+ }
+
+ readiness_probe = pod_spec._make_readiness_probe(port)
+
+ self.assertDictEqual(expected_result, readiness_probe)
+
+ def test_make_liveness_probe(self) -> NoReturn:
+ """Testing make liveness probe."""
+ port = 9999
+
+ expected_result = {
+ "httpGet": {
+ "path": "/osm/",
+ "port": port,
+ },
+ "initialDelaySeconds": 45,
+ "timeoutSeconds": 5,
+ }
+
+ liveness_probe = pod_spec._make_liveness_probe(port)
+
+ self.assertDictEqual(expected_result, liveness_probe)
+
+ def test_make_pod_spec_without_image_info(self) -> NoReturn:
+ """Testing make pod spec without image_info."""
+ image_info = None
+ config = {
+ "enable_test": False,
+ "database_commonkey": "commonkey",
+ "log_level": "DEBUG",
+ "auth_backend": "internal",
+ "site_url": "",
+ }
+ relation_state = {
+ "message_host": "kafka",
+ "message_port": 9090,
+ "database_uri": "mongo://mongo",
+ "prometheus_host": "prometheus",
+ "prometheus_port": 9082,
+ }
+ app_name = "nbi"
+ port = 9999
+
+ spec = pod_spec.make_pod_spec(
+ image_info, config, relation_state, app_name, port
+ )
+
+ self.assertIsNone(spec)
+
+ def test_make_pod_spec_without_config(self) -> NoReturn:
+ """Testing make pod spec without config."""
+ image_info = {"upstream-source": "opensourcemano/nbi:8"}
+ config = {}
+ relation_state = {
+ "message_host": "kafka",
+ "message_port": 9090,
+ "database_uri": "mongo://mongo",
+ "prometheus_host": "prometheus",
+ "prometheus_port": 9082,
+ }
+ app_name = "nbi"
+ port = 9999
+
+ with self.assertRaises(ValidationError) as exc:
+ pod_spec.make_pod_spec(
+ image_info, config, relation_state, app_name, port
+ )
+
+ def test_make_pod_spec_without_relation_state(self) -> NoReturn:
+ """Testing make pod spec without relation_state."""
+ image_info = {"upstream-source": "opensourcemano/nbi:8"}
+ config = {
+ "enable_test": False,
+ "database_commonkey": "commonkey",
+ "log_level": "DEBUG",
+ "auth_backend": "internal",
+ "site_url": "",
+ }
+ relation_state = {}
+ app_name = "nbi"
+ port = 9999
+
+ with self.assertRaises(ValidationError) as exc:
+ pod_spec.make_pod_spec(
+ image_info, config, relation_state, app_name, port
+ )
+
+ def test_make_pod_spec(self) -> NoReturn:
+ """Testing make pod spec."""
+ image_info = {"upstream-source": "opensourcemano/nbi:8"}
+ config = {
+ "enable_test": False,
+ "database_commonkey": "commonkey",
+ "log_level": "DEBUG",
+ "auth_backend": "internal",
+ "site_url": "",
+ }
+ relation_state = {
+ "message_host": "kafka",
+ "message_port": 9090,
+ "database_uri": "mongo://mongo",
+ "prometheus_host": "prometheus",
+ "prometheus_port": 9082,
+ }
+ app_name = "nbi"
+ port = 9999
+
+ expected_result = {
+ "version": 3,
+ "containers": [
+ {
+ "name": app_name,
+ "imageDetails": image_info,
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "name": "nbi",
+ "containerPort": port,
+ "protocol": "TCP",
+ }
+ ],
+ "envConfig": {
+ "ALLOW_ANONYMOUS_LOGIN": "yes",
+ "OSMNBI_SERVER_ENABLE_TEST": config["enable_test"],
+ "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public",
+ "OSMNBI_MESSAGE_HOST": relation_state["message_host"],
+ "OSMNBI_MESSAGE_DRIVER": "kafka",
+ "OSMNBI_MESSAGE_PORT": relation_state["message_port"],
+ "OSMNBI_DATABASE_DRIVER": "mongo",
+ "OSMNBI_DATABASE_URI": relation_state["database_uri"],
+ "OSMNBI_DATABASE_COMMONKEY": config["database_commonkey"],
+ "OSMNBI_STORAGE_DRIVER": "mongo",
+ "OSMNBI_STORAGE_PATH": "/app/storage",
+ "OSMNBI_STORAGE_COLLECTION": "files",
+ "OSMNBI_STORAGE_URI": relation_state["database_uri"],
+ "OSMNBI_PROMETHEUS_HOST": relation_state["prometheus_host"],
+ "OSMNBI_PROMETHEUS_PORT": relation_state["prometheus_port"],
+ "OSMNBI_LOG_LEVEL": config["log_level"],
+ "OSMNBI_AUTHENTICATION_BACKEND": config["auth_backend"],
+ },
+ }
+ ],
+ "kubernetesResources": {
+ "ingressResources": [],
+ },
+ }
+
+ spec = pod_spec.make_pod_spec(
+ image_info, config, relation_state, app_name, port
+ )
+
+ self.assertDictEqual(expected_result, spec)
+
+ def test_make_pod_spec_with_keystone(self) -> NoReturn:
+ """Testing make pod spec with keystone."""
+ image_info = {"upstream-source": "opensourcemano/nbi:8"}
+ config = {
+ "enable_test": False,
+ "database_commonkey": "commonkey",
+ "log_level": "DEBUG",
+ "auth_backend": "keystone",
+ "site_url": "",
+ }
+ relation_state = {
+ "message_host": "kafka",
+ "message_port": 9090,
+ "database_uri": "mongo://mongo",
+ "prometheus_host": "prometheus",
+ "prometheus_port": 9082,
+ "keystone_host": "keystone",
+ "keystone_port": 5000,
+ "keystone_user_domain_name": "user_domain",
+ "keystone_project_domain_name": "project_domain",
+ "keystone_username": "username",
+ "keystone_password": "password",
+ "keystone_service": "service",
+ }
+ app_name = "nbi"
+ port = 9999
+
+ expected_result = {
+ "version": 3,
+ "containers": [
+ {
+ "name": app_name,
+ "imageDetails": image_info,
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "name": "nbi",
+ "containerPort": port,
+ "protocol": "TCP",
+ }
+ ],
+ "envConfig": {
+ "ALLOW_ANONYMOUS_LOGIN": "yes",
+ "OSMNBI_SERVER_ENABLE_TEST": config["enable_test"],
+ "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public",
+ "OSMNBI_MESSAGE_HOST": relation_state["message_host"],
+ "OSMNBI_MESSAGE_DRIVER": "kafka",
+ "OSMNBI_MESSAGE_PORT": relation_state["message_port"],
+ "OSMNBI_DATABASE_DRIVER": "mongo",
+ "OSMNBI_DATABASE_URI": relation_state["database_uri"],
+ "OSMNBI_DATABASE_COMMONKEY": config["database_commonkey"],
+ "OSMNBI_STORAGE_DRIVER": "mongo",
+ "OSMNBI_STORAGE_PATH": "/app/storage",
+ "OSMNBI_STORAGE_COLLECTION": "files",
+ "OSMNBI_STORAGE_URI": relation_state["database_uri"],
+ "OSMNBI_PROMETHEUS_HOST": relation_state["prometheus_host"],
+ "OSMNBI_PROMETHEUS_PORT": relation_state["prometheus_port"],
+ "OSMNBI_LOG_LEVEL": config["log_level"],
+ "OSMNBI_AUTHENTICATION_BACKEND": config["auth_backend"],
+ "OSMNBI_AUTHENTICATION_AUTH_URL": relation_state[
+ "keystone_host"
+ ],
+ "OSMNBI_AUTHENTICATION_AUTH_PORT": relation_state[
+ "keystone_port"
+ ],
+ "OSMNBI_AUTHENTICATION_USER_DOMAIN_NAME": relation_state[
+ "keystone_user_domain_name"
+ ],
+ "OSMNBI_AUTHENTICATION_PROJECT_DOMAIN_NAME": relation_state[
+ "keystone_project_domain_name"
+ ],
+ "OSMNBI_AUTHENTICATION_SERVICE_USERNAME": relation_state[
+ "keystone_username"
+ ],
+ "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD": relation_state[
+ "keystone_password"
+ ],
+ "OSMNBI_AUTHENTICATION_SERVICE_PROJECT": relation_state[
+ "keystone_service"
+ ],
+ },
+ }
+ ],
+ "kubernetesResources": {
+ "ingressResources": [],
+ },
+ }
+
+ spec = pod_spec.make_pod_spec(
+ image_info, config, relation_state, app_name, port
+ )
+
+ self.assertDictEqual(expected_result, spec)
+
+
+if __name__ == "__main__":
+ unittest.main()
--- /dev/null
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+[tox]
+skipsdist = True
+envlist = unit, lint
+sitepackages = False
+skip_missing_interpreters = False
+
+[testenv]
+basepython = python3
+setenv =
+ PYTHONHASHSEED=0
+ PYTHONPATH = {toxinidir}/src
+ CHARM_NAME = nbi
+
+[testenv:build]
+passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY
+whitelist_externals =
+ charmcraft
+ rm
+ unzip
+commands =
+ rm -rf release nbi.charm
+ charmcraft build
+ unzip nbi.charm -d release
+
+[testenv:unit]
+commands =
+ coverage erase
+ stestr run --slowest --test-path=./tests --top-dir=./
+ coverage combine
+ coverage html -d cover
+ coverage xml -o cover/coverage.xml
+ coverage report
+deps =
+ coverage
+ stestr
+ mock
+ ops
+ pydantic
+setenv =
+ {[testenv]setenv}
+ PYTHON=coverage run
+
+[testenv:lint]
+deps =
+ black
+ yamllint
+ flake8
+commands =
+ black --check --diff . --exclude "build/|.tox/|mod/|lib/"
+ yamllint .
+ flake8 . --max-line-length=100 --ignore="E501,W503,W504,F722" --exclude "build/ .tox/ mod/ lib/"
+
+[coverage:run]
+branch = True
+concurrency = multiprocessing
+parallel = True
+source =
+ .
+omit =
+ .tox/*
+ tests/*