+
+# LOGGER = logging.getLogger(__name__)
+
+
+# class ConfigurePodEvent(EventBase):
+# """Configure Pod event"""
+
+# pass
+
+
+# class KeystoneEvents(CharmEvents):
+# """Keystone Events"""
+
+# configure_pod = EventSource(ConfigurePodEvent)
+
+# class KeystoneCharm(CharmBase):
+# """Keystone K8s Charm"""
+
+# state = StoredState()
+# on = KeystoneEvents()
+
+# def __init__(self, *args) -> NoReturn:
+# """Constructor of the Charm object.
+# Initializes internal state and register events it can handle.
+# """
+# super().__init__(*args)
+# self.state.set_default(db_host=None)
+# self.state.set_default(db_port=None)
+# self.state.set_default(db_user=None)
+# self.state.set_default(db_password=None)
+# self.state.set_default(pod_spec=None)
+# self.state.set_default(fernet_keys=None)
+# self.state.set_default(credential_keys=None)
+# self.state.set_default(keys_timestamp=0)
+
+# # Register all of the events we want to observe
+# self.framework.observe(self.on.config_changed, self.configure_pod)
+# self.framework.observe(self.on.start, self.configure_pod)
+# self.framework.observe(self.on.upgrade_charm, self.configure_pod)
+# self.framework.observe(self.on.leader_elected, self.configure_pod)
+# self.framework.observe(self.on.update_status, self.configure_pod)
+
+# # Registering custom internal events
+# self.framework.observe(self.on.configure_pod, self.configure_pod)
+
+# # Register relation events
+# self.framework.observe(
+# self.on.db_relation_changed, self._on_db_relation_changed
+# )
+# self.framework.observe(
+# self.on.db_relation_broken, self._on_db_relation_broken
+# )
+# self.framework.observe(
+# self.on.keystone_relation_joined, self._publish_keystone_info
+# )
+
+# def _publish_keystone_info(self, event: EventBase) -> NoReturn:
+# """Publishes keystone information for NBI usage through the keystone
+# relation.
+
+# Args:
+# event (EventBase): Keystone relation event to update NBI.
+# """
+# config = self.model.config
+# rel_data = {
+# "host": f"http://{self.app.name}:{KEYSTONE_PORT}/v3",
+# "port": str(KEYSTONE_PORT),
+# "keystone_db_password": config["keystone_db_password"],
+# "region_id": config["region_id"],
+# "user_domain_name": config["user_domain_name"],
+# "project_domain_name": config["project_domain_name"],
+# "admin_username": config["admin_username"],
+# "admin_password": config["admin_password"],
+# "admin_project_name": config["admin_project"],
+# "username": config["service_username"],
+# "password": config["service_password"],
+# "service": config["service_project"],
+# }
+# for k, v in rel_data.items():
+# event.relation.data[self.model.unit][k] = v
+
+# def _on_db_relation_changed(self, event: EventBase) -> NoReturn:
+# """Reads information about the DB relation, in order for keystone to
+# access it.
+
+# Args:
+# event (EventBase): DB relation event to access database
+# information.
+# """
+# if not event.unit in event.relation.data:
+# return
+# relation_data = event.relation.data[event.unit]
+# db_host = relation_data.get("host")
+# db_port = int(relation_data.get("port", 3306))
+# db_user = "root"
+# db_password = relation_data.get("root_password")
+
+# if (
+# db_host
+# and db_port
+# and db_user
+# and db_password
+# and (
+# self.state.db_host != db_host
+# or self.state.db_port != db_port
+# or self.state.db_user != db_user
+# or self.state.db_password != db_password
+# )
+# ):
+# self.state.db_host = db_host
+# self.state.db_port = db_port
+# self.state.db_user = db_user
+# self.state.db_password = db_password
+# self.on.configure_pod.emit()
+
+
+# def _on_db_relation_broken(self, event: EventBase) -> NoReturn:
+# """Clears data from db relation.
+
+# Args:
+# event (EventBase): DB relation event.
+
+# """
+# self.state.db_host = None
+# self.state.db_port = None
+# self.state.db_user = None
+# self.state.db_password = None
+# self.on.configure_pod.emit()
+
+# def _check_settings(self) -> str:
+# """Check if there any settings missing from Keystone configuration.
+
+# Returns:
+# str: Information about the problems found (if any).
+# """
+# problems = []
+# config = self.model.config
+
+# for setting in REQUIRED_SETTINGS:
+# if not config.get(setting):
+# problem = f"missing config {setting}"
+# problems.append(problem)
+
+# return ";".join(problems)
+
+# def _make_pod_image_details(self) -> Dict[str, str]:
+# """Generate the pod image details.
+
+# Returns:
+# Dict[str, str]: pod image details.
+# """
+# config = self.model.config
+# image_details = {
+# "imagePath": config["image"],
+# }
+# if config["image_username"]:
+# image_details.update(
+# {
+# "username": config["image_username"],
+# "password": config["image_password"],
+# }
+# )
+# return image_details
+
+# def _make_pod_ports(self) -> List[Dict[str, Any]]:
+# """Generate the pod ports details.
+
+# Returns:
+# List[Dict[str, Any]]: pod ports details.
+# """
+# return [
+# {"name": "keystone", "containerPort": KEYSTONE_PORT, "protocol": "TCP"},
+# ]
+
+# def _make_pod_envconfig(self) -> Dict[str, Any]:
+# """Generate pod environment configuraiton.
+
+# Returns:
+# Dict[str, Any]: pod environment configuration.
+# """
+# config = self.model.config
+
+# envconfig = {
+# "DB_HOST": self.state.db_host,
+# "DB_PORT": self.state.db_port,
+# "ROOT_DB_USER": self.state.db_user,
+# "ROOT_DB_PASSWORD": self.state.db_password,
+# "KEYSTONE_DB_PASSWORD": config["keystone_db_password"],
+# "REGION_ID": config["region_id"],
+# "KEYSTONE_HOST": self.app.name,
+# "ADMIN_USERNAME": config["admin_username"],
+# "ADMIN_PASSWORD": config["admin_password"],
+# "ADMIN_PROJECT": config["admin_project"],
+# "SERVICE_USERNAME": config["service_username"],
+# "SERVICE_PASSWORD": config["service_password"],
+# "SERVICE_PROJECT": config["service_project"],
+# }
+
+# if config.get("ldap_enabled"):
+# envconfig["LDAP_AUTHENTICATION_DOMAIN_NAME"] = config[
+# "ldap_authentication_domain_name"
+# ]
+# envconfig["LDAP_URL"] = config["ldap_url"]
+# envconfig["LDAP_PAGE_SIZE"] = config["ldap_page_size"]
+# envconfig["LDAP_USER_OBJECTCLASS"] = config["ldap_user_objectclass"]
+# envconfig["LDAP_USER_ID_ATTRIBUTE"] = config["ldap_user_id_attribute"]
+# envconfig["LDAP_USER_NAME_ATTRIBUTE"] = config["ldap_user_name_attribute"]
+# envconfig["LDAP_USER_PASS_ATTRIBUTE"] = config["ldap_user_pass_attribute"]
+# envconfig["LDAP_USER_ENABLED_MASK"] = config["ldap_user_enabled_mask"]
+# envconfig["LDAP_USER_ENABLED_DEFAULT"] = config["ldap_user_enabled_default"]
+# envconfig["LDAP_USER_ENABLED_INVERT"] = config["ldap_user_enabled_invert"]
+# envconfig["LDAP_GROUP_OBJECTCLASS"] = config["ldap_group_objectclass"]
+
+# if config["ldap_bind_user"]:
+# envconfig["LDAP_BIND_USER"] = config["ldap_bind_user"]
+
+# if config["ldap_bind_password"]:
+# envconfig["LDAP_BIND_PASSWORD"] = config["ldap_bind_password"]
+
+# if config["ldap_user_tree_dn"]:
+# envconfig["LDAP_USER_TREE_DN"] = config["ldap_user_tree_dn"]
+
+# if config["ldap_user_filter"]:
+# envconfig["LDAP_USER_FILTER"] = config["ldap_user_filter"]
+
+# if config["ldap_user_enabled_attribute"]:
+# envconfig["LDAP_USER_ENABLED_ATTRIBUTE"] = config[
+# "ldap_user_enabled_attribute"
+# ]
+
+# if config["ldap_chase_referrals"]:
+# envconfig["LDAP_CHASE_REFERRALS"] = config["ldap_chase_referrals"]
+
+# if config["ldap_group_tree_dn"]:
+# envconfig["LDAP_GROUP_TREE_DN"] = config["ldap_group_tree_dn"]
+
+# if config["ldap_use_starttls"]:
+# envconfig["LDAP_USE_STARTTLS"] = config["ldap_use_starttls"]
+# envconfig["LDAP_TLS_CACERT_BASE64"] = config["ldap_tls_cacert_base64"]
+# envconfig["LDAP_TLS_REQ_CERT"] = config["ldap_tls_req_cert"]
+
+# return envconfig
+
+# def _make_pod_ingress_resources(self) -> List[Dict[str, Any]]:
+# """Generate pod ingress resources.
+
+# Returns:
+# List[Dict[str, Any]]: pod ingress resources.
+# """
+# site_url = self.model.config["site_url"]
+
+# if not site_url:
+# return
+
+# parsed = urlparse(site_url)
+
+# if not parsed.scheme.startswith("http"):
+# return
+
+# max_file_size = self.model.config["max_file_size"]
+# ingress_whitelist_source_range = self.model.config[
+# "ingress_whitelist_source_range"
+# ]
+
+# annotations = {
+# "nginx.ingress.kubernetes.io/proxy-body-size": "{}m".format(max_file_size)
+# }
+
+# if ingress_whitelist_source_range:
+# annotations[
+# "nginx.ingress.kubernetes.io/whitelist-source-range"
+# ] = ingress_whitelist_source_range
+
+# ingress_spec_tls = None
+
+# if parsed.scheme == "https":
+# ingress_spec_tls = [{"hosts": [parsed.hostname]}]
+# tls_secret_name = self.model.config["tls_secret_name"]
+# if tls_secret_name:
+# ingress_spec_tls[0]["secretName"] = tls_secret_name
+# else:
+# annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false"
+
+# ingress = {
+# "name": "{}-ingress".format(self.app.name),
+# "annotations": annotations,
+# "spec": {
+# "rules": [
+# {
+# "host": parsed.hostname,
+# "http": {
+# "paths": [
+# {
+# "path": "/",
+# "backend": {
+# "serviceName": self.app.name,
+# "servicePort": KEYSTONE_PORT,
+# },
+# }
+# ]
+# },
+# }
+# ],
+# },
+# }
+# if ingress_spec_tls:
+# ingress["spec"]["tls"] = ingress_spec_tls
+
+# return [ingress]
+
+# def _generate_keys(self) -> Tuple[List[str], List[str]]:
+# """Generating new fernet tokens.
+
+# Returns:
+# Tuple[List[str], List[str]]: contains two lists of strings. First
+# list contains strings that represent
+# the keys for fernet and the second
+# list contains strins that represent
+# the keys for credentials.
+# """
+# fernet_keys = [
+# Fernet.generate_key().decode() for _ in range(NUMBER_FERNET_KEYS)
+# ]
+# credential_keys = [
+# Fernet.generate_key().decode() for _ in range(NUMBER_CREDENTIAL_KEYS)
+# ]
+
+# return (fernet_keys, credential_keys)
+
+# def configure_pod(self, event: EventBase) -> NoReturn:
+# """Assemble the pod spec and apply it, if possible.
+
+# Args:
+# event (EventBase): Hook or Relation event that started the
+# function.
+# """
+# if not self.state.db_host:
+# self.unit.status = WaitingStatus("Waiting for database relation")
+# event.defer()
+# return
+
+# if not self.unit.is_leader():
+# self.unit.status = ActiveStatus("ready")
+# return
+
+# if fernet_keys := self.state.fernet_keys:
+# fernet_keys = json.loads(fernet_keys)
+
+# if credential_keys := self.state.credential_keys:
+# credential_keys = json.loads(credential_keys)
+
+# now = datetime.now().timestamp()
+# keys_timestamp = self.state.keys_timestamp
+# token_expiration = self.model.config["token_expiration"]
+
+# valid_keys = (now - keys_timestamp) < token_expiration
+# if not credential_keys or not fernet_keys or not valid_keys:
+# fernet_keys, credential_keys = self._generate_keys()
+# self.state.fernet_keys = json.dumps(fernet_keys)
+# self.state.credential_keys = json.dumps(credential_keys)
+# self.state.keys_timestamp = now
+
+# # Check problems in the settings
+# problems = self._check_settings()
+# if problems:
+# self.unit.status = BlockedStatus(problems)
+# return
+
+# self.unit.status = BlockedStatus("Assembling pod spec")
+# image_details = self._make_pod_image_details()
+# ports = self._make_pod_ports()
+# env_config = self._make_pod_envconfig()
+# ingress_resources = self._make_pod_ingress_resources()
+# files = self._make_pod_files(fernet_keys, credential_keys)
+
+# pod_spec = {
+# "version": 3,
+# "containers": [
+# {
+# "name": self.framework.model.app.name,
+# "imageDetails": image_details,
+# "ports": ports,
+# "envConfig": env_config,
+# "volumeConfig": files,
+# }
+# ],
+# "kubernetesResources": {"ingressResources": ingress_resources or []},
+# }
+
+# if self.state.pod_spec != (
+# pod_spec_json := json.dumps(pod_spec, sort_keys=True)
+# ):
+# self.state.pod_spec = pod_spec_json
+# self.model.pod.set_spec(pod_spec)
+
+# self.unit.status = ActiveStatus("ready")
+
+
+# if __name__ == "__main__":
+# main(KeystoneCharm)