| #!/usr/bin/env python3 |
| # Copyright 2020 Canonical Ltd. |
| # |
| # Licensed under the Apache License, Version 2.0 (the "License"); |
| # you may not use this file except in compliance with the License. |
| # You may obtain a copy of the License at |
| # |
| # http://www.apache.org/licenses/LICENSE-2.0 |
| # |
| # Unless required by applicable law or agreed to in writing, software |
| # distributed under the License is distributed on an "AS IS" BASIS, |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| # See the License for the specific language governing permissions and |
| # limitations under the License. |
| |
| import logging |
| |
| from urllib.parse import urlparse |
| |
| from ops.charm import CharmBase |
| |
| # from ops.framework import StoredState |
| from ops.main import main |
| from ops.model import ( |
| ActiveStatus, |
| BlockedStatus, |
| # MaintenanceStatus, |
| WaitingStatus, |
| # ModelError, |
| ) |
| from ops.framework import StoredState |
| |
| logger = logging.getLogger(__name__) |
| |
| REQUIRED_SETTINGS = [] |
| |
| DATABASE_NAME = "keystone" # This is hardcoded in the keystone container script |
| # We expect the keystone container to use the default port |
| KEYSTONE_PORT = 5000 |
| |
| |
| class KeystoneCharm(CharmBase): |
| |
| state = StoredState() |
| |
| def __init__(self, *args): |
| super().__init__(*args) |
| |
| # Register all of the events we want to observe |
| self.framework.observe(self.on.config_changed, self.configure_pod) |
| self.framework.observe(self.on.start, self.configure_pod) |
| self.framework.observe(self.on.upgrade_charm, self.configure_pod) |
| |
| # Register relation events |
| self.state.set_default( |
| db_host=None, db_port=None, db_user=None, db_password=None |
| ) |
| self.framework.observe( |
| self.on.db_relation_changed, self._on_db_relation_changed |
| ) |
| self.framework.observe( |
| self.on.keystone_relation_joined, self._publish_keystone_info |
| ) |
| |
| def _publish_keystone_info(self, event): |
| config = self.model.config |
| if self.unit.is_leader(): |
| rel_data = { |
| "host": f"http://{self.app.name}:{KEYSTONE_PORT}/v3", |
| "port": str(KEYSTONE_PORT), |
| "keystone_db_password": config["keystone_db_password"], |
| "region_id": config["region_id"], |
| "user_domain_name": config["user_domain_name"], |
| "project_domain_name": config["project_domain_name"], |
| "admin_username": config["admin_username"], |
| "admin_password": config["admin_password"], |
| "admin_project_name": config["admin_project"], |
| "username": config["service_username"], |
| "password": config["service_password"], |
| "service": config["service_project"], |
| } |
| for k, v in rel_data.items(): |
| event.relation.data[self.model.unit][k] = v |
| |
| def _on_db_relation_changed(self, event): |
| self.state.db_host = event.relation.data[event.unit].get("host") |
| self.state.db_port = event.relation.data[event.unit].get("port", 3306) |
| self.state.db_user = "root" # event.relation.data[event.unit].get("user") |
| self.state.db_password = event.relation.data[event.unit].get("root_password") |
| if self.state.db_host: |
| self.configure_pod(event) |
| |
| def _check_settings(self): |
| problems = [] |
| config = self.model.config |
| |
| for setting in REQUIRED_SETTINGS: |
| if not config.get(setting): |
| problem = f"missing config {setting}" |
| problems.append(problem) |
| |
| return ";".join(problems) |
| |
| def _make_pod_image_details(self): |
| config = self.model.config |
| image_details = { |
| "imagePath": config["image"], |
| } |
| if config["image_username"]: |
| image_details.update( |
| { |
| "username": config["image_username"], |
| "password": config["image_password"], |
| } |
| ) |
| return image_details |
| |
| def _make_pod_ports(self): |
| return [ |
| {"name": "keystone", "containerPort": KEYSTONE_PORT, "protocol": "TCP"}, |
| ] |
| |
| def _make_pod_envconfig(self): |
| config = self.model.config |
| |
| return { |
| "DB_HOST": self.state.db_host, |
| "DB_PORT": self.state.db_port, |
| "ROOT_DB_USER": self.state.db_user, |
| "ROOT_DB_PASSWORD": self.state.db_password, |
| "KEYSTONE_DB_PASSWORD": config["keystone_db_password"], |
| "REGION_ID": config["region_id"], |
| "KEYSTONE_HOST": self.app.name, |
| "ADMIN_USERNAME": config["admin_username"], |
| "ADMIN_PASSWORD": config["admin_password"], |
| "ADMIN_PROJECT": config["admin_project"], |
| "SERVICE_USERNAME": config["service_username"], |
| "SERVICE_PASSWORD": config["service_password"], |
| "SERVICE_PROJECT": config["service_project"], |
| } |
| |
| def _make_pod_ingress_resources(self): |
| site_url = self.model.config["site_url"] |
| |
| if not site_url: |
| return |
| |
| parsed = urlparse(site_url) |
| |
| if not parsed.scheme.startswith("http"): |
| return |
| |
| max_file_size = self.model.config["max_file_size"] |
| ingress_whitelist_source_range = self.model.config[ |
| "ingress_whitelist_source_range" |
| ] |
| |
| annotations = { |
| "nginx.ingress.kubernetes.io/proxy-body-size": "{}m".format(max_file_size) |
| } |
| |
| if ingress_whitelist_source_range: |
| annotations[ |
| "nginx.ingress.kubernetes.io/whitelist-source-range" |
| ] = ingress_whitelist_source_range |
| |
| ingress_spec_tls = None |
| |
| if parsed.scheme == "https": |
| ingress_spec_tls = [{"hosts": [parsed.hostname]}] |
| tls_secret_name = self.model.config["tls_secret_name"] |
| if tls_secret_name: |
| ingress_spec_tls[0]["secretName"] = tls_secret_name |
| else: |
| annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false" |
| |
| ingress = { |
| "name": "{}-ingress".format(self.app.name), |
| "annotations": annotations, |
| "spec": { |
| "rules": [ |
| { |
| "host": parsed.hostname, |
| "http": { |
| "paths": [ |
| { |
| "path": "/", |
| "backend": { |
| "serviceName": self.app.name, |
| "servicePort": KEYSTONE_PORT, |
| }, |
| } |
| ] |
| }, |
| } |
| ], |
| }, |
| } |
| if ingress_spec_tls: |
| ingress["spec"]["tls"] = ingress_spec_tls |
| |
| return [ingress] |
| |
| def configure_pod(self, event): |
| """Assemble the pod spec and apply it, if possible.""" |
| |
| if not self.state.db_host: |
| self.unit.status = WaitingStatus("Waiting for database relation") |
| event.defer() |
| return |
| |
| if not self.unit.is_leader(): |
| self.unit.status = ActiveStatus() |
| return |
| |
| # Check problems in the settings |
| problems = self._check_settings() |
| if problems: |
| self.unit.status = BlockedStatus(problems) |
| return |
| |
| self.unit.status = BlockedStatus("Assembling pod spec") |
| image_details = self._make_pod_image_details() |
| ports = self._make_pod_ports() |
| env_config = self._make_pod_envconfig() |
| ingress_resources = self._make_pod_ingress_resources() |
| |
| pod_spec = { |
| "version": 3, |
| "containers": [ |
| { |
| "name": self.framework.model.app.name, |
| "imageDetails": image_details, |
| "ports": ports, |
| "envConfig": env_config, |
| } |
| ], |
| "kubernetesResources": {"ingressResources": ingress_resources or []}, |
| } |
| self.model.pod.set_spec(pod_spec) |
| self.unit.status = ActiveStatus() |
| |
| |
| if __name__ == "__main__": |
| main(KeystoneCharm) |