Update exporters: migration to CharmHub
[osm/devops.git] / installers / charm / kafka-exporter / src / charm.py
1 #!/usr/bin/env python3
2 # Copyright 2021 Canonical Ltd.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
14 # under the License.
15 #
16 # For those usages not covered by the Apache License, Version 2.0 please
17 # contact: legal@canonical.com
18 #
19 # To get in touch with the maintainers, please contact:
20 # osm-charmers@lists.launchpad.net
21 ##
22
23 # pylint: disable=E0213
24
25 from ipaddress import ip_network
26 import logging
27 from pathlib import Path
28 from typing import NoReturn, Optional
29 from urllib.parse import urlparse
30
31 from charms.kafka_k8s.v0.kafka import KafkaEvents, KafkaRequires
32 from ops.main import main
33 from opslib.osm.charm import CharmedOsmBase, RelationsMissing
34 from opslib.osm.interfaces.grafana import GrafanaDashboardTarget
35 from opslib.osm.interfaces.prometheus import PrometheusScrapeTarget
36 from opslib.osm.pod import (
37 ContainerV3Builder,
38 IngressResourceV3Builder,
39 PodSpecV3Builder,
40 )
41 from opslib.osm.validator import ModelValidator, validator
42
43
44 logger = logging.getLogger(__name__)
45
46 PORT = 9308
47
48
49 class ConfigModel(ModelValidator):
50 site_url: Optional[str]
51 cluster_issuer: Optional[str]
52 ingress_class: Optional[str]
53 ingress_whitelist_source_range: Optional[str]
54 tls_secret_name: Optional[str]
55 image_pull_policy: str
56 security_context: bool
57 kafka_endpoint: Optional[str]
58
59 @validator("site_url")
60 def validate_site_url(cls, v):
61 if v:
62 parsed = urlparse(v)
63 if not parsed.scheme.startswith("http"):
64 raise ValueError("value must start with http")
65 return v
66
67 @validator("ingress_whitelist_source_range")
68 def validate_ingress_whitelist_source_range(cls, v):
69 if v:
70 ip_network(v)
71 return v
72
73 @validator("image_pull_policy")
74 def validate_image_pull_policy(cls, v):
75 values = {
76 "always": "Always",
77 "ifnotpresent": "IfNotPresent",
78 "never": "Never",
79 }
80 v = v.lower()
81 if v not in values.keys():
82 raise ValueError("value must be always, ifnotpresent or never")
83 return values[v]
84
85 @validator("kafka_endpoint")
86 def validate_kafka_endpoint(cls, v):
87 if v and len(v.split(":")) != 2:
88 raise ValueError("value must be in the format <host>:<port>")
89 return v
90
91
92 class KafkaEndpoint:
93 def __init__(self, host: str, port: str) -> None:
94 self.host = host
95 self.port = port
96
97
98 class KafkaExporterCharm(CharmedOsmBase):
99
100 on = KafkaEvents()
101
102 def __init__(self, *args) -> NoReturn:
103 super().__init__(*args, oci_image="image")
104
105 # Provision Kafka relation to exchange information
106 self.kafka = KafkaRequires(self)
107 self.framework.observe(self.on.kafka_available, self.configure_pod)
108 self.framework.observe(self.on.kafka_broken, self.configure_pod)
109
110 # Register relation to provide a Scraping Target
111 self.scrape_target = PrometheusScrapeTarget(self, "prometheus-scrape")
112 self.framework.observe(
113 self.on["prometheus-scrape"].relation_joined, self._publish_scrape_info
114 )
115
116 # Register relation to provide a Dasboard Target
117 self.dashboard_target = GrafanaDashboardTarget(self, "grafana-dashboard")
118 self.framework.observe(
119 self.on["grafana-dashboard"].relation_joined, self._publish_dashboard_info
120 )
121
122 def _publish_scrape_info(self, event) -> NoReturn:
123 """Publishes scraping information for Prometheus.
124
125 Args:
126 event (EventBase): Prometheus relation event.
127 """
128 if self.unit.is_leader():
129 hostname = (
130 urlparse(self.model.config["site_url"]).hostname
131 if self.model.config["site_url"]
132 else self.model.app.name
133 )
134 port = str(PORT)
135 if self.model.config.get("site_url", "").startswith("https://"):
136 port = "443"
137 elif self.model.config.get("site_url", "").startswith("http://"):
138 port = "80"
139
140 self.scrape_target.publish_info(
141 hostname=hostname,
142 port=port,
143 metrics_path="/metrics",
144 scrape_interval="30s",
145 scrape_timeout="15s",
146 )
147
148 def _publish_dashboard_info(self, event) -> NoReturn:
149 """Publish dashboards for Grafana.
150
151 Args:
152 event (EventBase): Grafana relation event.
153 """
154 if self.unit.is_leader():
155 self.dashboard_target.publish_info(
156 name="osm-kafka",
157 dashboard=Path("templates/kafka_exporter_dashboard.json").read_text(),
158 )
159
160 def _is_kafka_endpoint_set(self, config: ConfigModel) -> bool:
161 """Check if Kafka endpoint is set."""
162 return config.kafka_endpoint or self._is_kafka_relation_set()
163
164 def _is_kafka_relation_set(self) -> bool:
165 """Check if the Kafka relation is set or not."""
166 return self.kafka.host and self.kafka.port
167
168 @property
169 def kafka_endpoint(self) -> KafkaEndpoint:
170 config = ConfigModel(**dict(self.config))
171 if config.kafka_endpoint:
172 host, port = config.kafka_endpoint.split(":")
173 else:
174 host = self.kafka.host
175 port = self.kafka.port
176 return KafkaEndpoint(host, port)
177
178 def build_pod_spec(self, image_info):
179 """Build the PodSpec to be used.
180
181 Args:
182 image_info (str): container image information.
183
184 Returns:
185 Dict: PodSpec information.
186 """
187 # Validate config
188 config = ConfigModel(**dict(self.config))
189
190 # Check relations
191 if not self._is_kafka_endpoint_set(config):
192 raise RelationsMissing(["kafka"])
193
194 # Create Builder for the PodSpec
195 pod_spec_builder = PodSpecV3Builder(
196 enable_security_context=config.security_context
197 )
198
199 # Build container
200 container_builder = ContainerV3Builder(
201 self.app.name,
202 image_info,
203 config.image_pull_policy,
204 run_as_non_root=config.security_context,
205 )
206 container_builder.add_port(name="exporter", port=PORT)
207 container_builder.add_http_readiness_probe(
208 path="/api/health",
209 port=PORT,
210 initial_delay_seconds=10,
211 period_seconds=10,
212 timeout_seconds=5,
213 success_threshold=1,
214 failure_threshold=3,
215 )
216 container_builder.add_http_liveness_probe(
217 path="/api/health",
218 port=PORT,
219 initial_delay_seconds=60,
220 timeout_seconds=30,
221 failure_threshold=10,
222 )
223 container_builder.add_command(
224 [
225 "kafka_exporter",
226 f"--kafka.server={self.kafka_endpoint.host}:{self.kafka_endpoint.port}",
227 ]
228 )
229 container = container_builder.build()
230
231 # Add container to PodSpec
232 pod_spec_builder.add_container(container)
233
234 # Add ingress resources to PodSpec if site url exists
235 if config.site_url:
236 parsed = urlparse(config.site_url)
237 annotations = {}
238 if config.ingress_class:
239 annotations["kubernetes.io/ingress.class"] = config.ingress_class
240 ingress_resource_builder = IngressResourceV3Builder(
241 f"{self.app.name}-ingress", annotations
242 )
243
244 if config.ingress_whitelist_source_range:
245 annotations[
246 "nginx.ingress.kubernetes.io/whitelist-source-range"
247 ] = config.ingress_whitelist_source_range
248
249 if config.cluster_issuer:
250 annotations["cert-manager.io/cluster-issuer"] = config.cluster_issuer
251
252 if parsed.scheme == "https":
253 ingress_resource_builder.add_tls(
254 [parsed.hostname], config.tls_secret_name
255 )
256 else:
257 annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false"
258
259 ingress_resource_builder.add_rule(parsed.hostname, self.app.name, PORT)
260 ingress_resource = ingress_resource_builder.build()
261 pod_spec_builder.add_ingress_resource(ingress_resource)
262
263 return pod_spec_builder.build()
264
265
266 if __name__ == "__main__":
267 main(KafkaExporterCharm)