X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=installers%2Fcharm%2Fkafka-exporter%2Fsrc%2Fcharm.py;h=123fa0b2632f9dd203b5b8151270de9f0948f4b1;hb=1072160c684a35076746d7b789bb0040643d3a20;hp=9f03a343a681dbb80eae69e98f3bd24959555e88;hpb=6332d381ac94fcee447d24fa33c15997c21bbf79;p=osm%2Fdevops.git diff --git a/installers/charm/kafka-exporter/src/charm.py b/installers/charm/kafka-exporter/src/charm.py index 9f03a343..123fa0b2 100755 --- a/installers/charm/kafka-exporter/src/charm.py +++ b/installers/charm/kafka-exporter/src/charm.py @@ -20,203 +20,207 @@ # osm-charmers@lists.launchpad.net ## +# pylint: disable=E0213 + +from ipaddress import ip_network import logging from pathlib import Path -from typing import Dict, List, NoReturn +from typing import NoReturn, Optional from urllib.parse import urlparse -from ops.charm import CharmBase -from ops.framework import EventBase, StoredState from ops.main import main -from ops.model import ActiveStatus, Application, BlockedStatus, MaintenanceStatus, Unit -from oci_image import OCIImageResource, OCIImageResourceError +from opslib.osm.charm import CharmedOsmBase, RelationsMissing +from opslib.osm.interfaces.grafana import GrafanaDashboardTarget +from opslib.osm.interfaces.kafka import KafkaClient +from opslib.osm.interfaces.prometheus import PrometheusScrapeTarget +from opslib.osm.pod import ( + ContainerV3Builder, + IngressResourceV3Builder, + PodSpecV3Builder, +) +from opslib.osm.validator import ModelValidator, validator -from pod_spec import make_pod_spec logger = logging.getLogger(__name__) -KAFKA_EXPORTER_PORT = 9308 - +PORT = 9308 -class RelationsMissing(Exception): - def __init__(self, missing_relations: List): - self.message = "" - if missing_relations and isinstance(missing_relations, list): - self.message += f'Waiting for {", ".join(missing_relations)} relation' - if "," in self.message: - self.message += "s" +class ConfigModel(ModelValidator): + site_url: Optional[str] + cluster_issuer: Optional[str] + ingress_whitelist_source_range: Optional[str] + tls_secret_name: Optional[str] -class RelationDefinition: - def __init__(self, relation_name: str, keys: List, source_type): - if source_type != Application and source_type != Unit: - raise TypeError( - "source_type should be ops.model.Application or ops.model.Unit" - ) - self.relation_name = relation_name - self.keys = keys - self.source_type = source_type - - -def check_missing_relation_data( - data: Dict, - expected_relations_data: List[RelationDefinition], -): - missing_relations = [] - for relation_data in expected_relations_data: - if not all( - f"{relation_data.relation_name}_{k}" in data for k in relation_data.keys - ): - missing_relations.append(relation_data.relation_name) - if missing_relations: - raise RelationsMissing(missing_relations) - - -def get_relation_data( - charm: CharmBase, - relation_data: RelationDefinition, -) -> Dict: - data = {} - relation = charm.model.get_relation(relation_data.relation_name) - if relation: - self_app_unit = ( - charm.app if relation_data.source_type == Application else charm.unit - ) - expected_type = relation_data.source_type - for app_unit in relation.data: - if app_unit != self_app_unit and isinstance(app_unit, expected_type): - if all(k in relation.data[app_unit] for k in relation_data.keys): - for k in relation_data.keys: - data[f"{relation_data.relation_name}_{k}"] = relation.data[ - app_unit - ].get(k) - break - return data - + @validator("site_url") + def validate_site_url(cls, v): + if v: + parsed = urlparse(v) + if not parsed.scheme.startswith("http"): + raise ValueError("value must start with http") + return v -class KafkaExporterCharm(CharmBase): - """Kafka Exporter Charm.""" + @validator("ingress_whitelist_source_range") + def validate_ingress_whitelist_source_range(cls, v): + if v: + ip_network(v) + return v - state = StoredState() +class KafkaExporterCharm(CharmedOsmBase): def __init__(self, *args) -> NoReturn: - """Kafka Exporter Charm constructor.""" - super().__init__(*args) - - # Internal state initialization - self.state.set_default(pod_spec=None) - - self.port = KAFKA_EXPORTER_PORT - self.image = OCIImageResource(self, "image") - - # Registering regular events - self.framework.observe(self.on.start, self.configure_pod) - self.framework.observe(self.on.config_changed, self.configure_pod) + super().__init__(*args, oci_image="image") - # Registering required relation events - self.framework.observe(self.on.kafka_relation_changed, self.configure_pod) + # Provision Kafka relation to exchange information + self.kafka_client = KafkaClient(self, "kafka") + self.framework.observe(self.on["kafka"].relation_changed, self.configure_pod) + self.framework.observe(self.on["kafka"].relation_broken, self.configure_pod) - # Registering required relation departed events - self.framework.observe(self.on.kafka_relation_departed, self.configure_pod) - - # Registering provided relation events + # Register relation to provide a Scraping Target + self.scrape_target = PrometheusScrapeTarget(self, "prometheus-scrape") self.framework.observe( - self.on.prometheus_scrape_relation_joined, self._publish_scrape_info + self.on["prometheus-scrape"].relation_joined, self._publish_scrape_info ) + + # Register relation to provide a Dasboard Target + self.dashboard_target = GrafanaDashboardTarget(self, "grafana-dashboard") self.framework.observe( - self.on.grafana_dashboard_relation_joined, self._publish_dashboard_info + self.on["grafana-dashboard"].relation_joined, self._publish_dashboard_info ) - def _publish_scrape_info(self, event: EventBase) -> NoReturn: - """Publishes scrape information. + def _publish_scrape_info(self, event) -> NoReturn: + """Publishes scraping information for Prometheus. + + Args: + event (EventBase): Prometheus relation event. + """ + if self.unit.is_leader(): + hostname = ( + urlparse(self.model.config["site_url"]).hostname + if self.model.config["site_url"] + else self.model.app.name + ) + port = str(PORT) + if self.model.config.get("site_url", "").startswith("https://"): + port = "443" + elif self.model.config.get("site_url", "").startswith("http://"): + port = "80" + + self.scrape_target.publish_info( + hostname=hostname, + port=port, + metrics_path="/metrics", + scrape_interval="30s", + scrape_timeout="15s", + ) + + def _publish_dashboard_info(self, event) -> NoReturn: + """Publish dashboards for Grafana. Args: - event (EventBase): Exporter relation event. + event (EventBase): Grafana relation event. """ - rel_data = { - "hostname": urlparse(self.model.config["site_url"]).hostname - if self.model.config["site_url"] - else self.model.app.name, - "port": "80" if self.model.config["site_url"] else str(KAFKA_EXPORTER_PORT), - "metrics_path": "/metrics", - "scrape_interval": "30s", - "scrape_timeout": "15s", - } - for k, v in rel_data.items(): - event.relation.data[self.unit][k] = v - - def _publish_dashboard_info(self, event: EventBase) -> NoReturn: - """Publishes dashboard information. + if self.unit.is_leader(): + self.dashboard_target.publish_info( + name="osm-kafka", + dashboard=Path("files/kafka_exporter_dashboard.json").read_text(), + ) + + def _check_missing_dependencies(self, config: ConfigModel): + """Check if there is any relation missing. Args: - event (EventBase): Exporter relation event. + config (ConfigModel): object with configuration information. + + Raises: + RelationsMissing: if kafka is missing. """ - rel_data = { - "name": "osm-kafka", - "dashboard": Path("files/kafka_exporter_dashboard.json").read_text(), - } - for k, v in rel_data.items(): - event.relation.data[self.unit][k] = v - - @property - def relations_requirements(self): - return [RelationDefinition("kafka", ["host", "port"], Unit)] - - def get_relation_state(self): - relation_state = {} - for relation_requirements in self.relations_requirements: - data = get_relation_data(self, relation_requirements) - relation_state = {**relation_state, **data} - check_missing_relation_data(relation_state, self.relations_requirements) - return relation_state - - def configure_pod(self, _=None) -> NoReturn: - """Assemble the pod spec and apply it, if possible. + missing_relations = [] + + if self.kafka_client.is_missing_data_in_unit(): + missing_relations.append("kafka") + + if missing_relations: + raise RelationsMissing(missing_relations) + + def build_pod_spec(self, image_info): + """Build the PodSpec to be used. Args: - event (EventBase): Hook or Relation event that started the - function. + image_info (str): container image information. + + Returns: + Dict: PodSpec information. """ - if not self.unit.is_leader(): - self.unit.status = ActiveStatus("ready") - return - - relation_state = None - try: - relation_state = self.get_relation_state() - except RelationsMissing as exc: - logger.exception("Relation missing error") - self.unit.status = BlockedStatus(exc.message) - return - - self.unit.status = MaintenanceStatus("Assembling pod spec") - - # Fetch image information - try: - self.unit.status = MaintenanceStatus("Fetching image information") - image_info = self.image.fetch() - except OCIImageResourceError: - self.unit.status = BlockedStatus("Error fetching image information") - return - - try: - pod_spec = make_pod_spec( - image_info, - self.model.config, - relation_state, - self.model.app.name, - self.port, + # Validate config + config = ConfigModel(**dict(self.config)) + + # Check relations + self._check_missing_dependencies(config) + + # Create Builder for the PodSpec + pod_spec_builder = PodSpecV3Builder() + + # Build container + container_builder = ContainerV3Builder(self.app.name, image_info) + container_builder.add_port(name=self.app.name, port=PORT) + container_builder.add_http_readiness_probe( + path="/api/health", + port=PORT, + initial_delay_seconds=10, + period_seconds=10, + timeout_seconds=5, + success_threshold=1, + failure_threshold=3, + ) + container_builder.add_http_liveness_probe( + path="/api/health", + port=PORT, + initial_delay_seconds=60, + timeout_seconds=30, + failure_threshold=10, + ) + container_builder.add_command( + [ + "kafka_exporter", + f"--kafka.server={self.kafka_client.host}:{self.kafka_client.port}", + ] + ) + container = container_builder.build() + + # Add container to PodSpec + pod_spec_builder.add_container(container) + + # Add ingress resources to PodSpec if site url exists + if config.site_url: + parsed = urlparse(config.site_url) + annotations = {} + ingress_resource_builder = IngressResourceV3Builder( + f"{self.app.name}-ingress", annotations ) - except ValueError as exc: - logger.exception("Config/Relation data validation error") - self.unit.status = BlockedStatus(str(exc)) - return - if self.state.pod_spec != pod_spec: - self.model.pod.set_spec(pod_spec) - self.state.pod_spec = pod_spec + if config.ingress_whitelist_source_range: + annotations[ + "nginx.ingress.kubernetes.io/whitelist-source-range" + ] = config.ingress_whitelist_source_range + + if config.cluster_issuer: + annotations["cert-manager.io/cluster-issuer"] = config.cluster_issuer + + if parsed.scheme == "https": + ingress_resource_builder.add_tls( + [parsed.hostname], config.tls_secret_name + ) + else: + annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false" + + ingress_resource_builder.add_rule(parsed.hostname, self.app.name, PORT) + ingress_resource = ingress_resource_builder.build() + pod_spec_builder.add_ingress_resource(ingress_resource) + + logger.debug(pod_spec_builder.build()) - self.unit.status = ActiveStatus("ready") + return pod_spec_builder.build() if __name__ == "__main__":