Fix bug 1547: Add ingress.class annotation to OSM charms
[osm/devops.git] / installers / charm / nbi / src / charm.py
1 #!/usr/bin/env python3
2 # Copyright 2021 Canonical Ltd.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
14 # under the License.
15 #
16 # For those usages not covered by the Apache License, Version 2.0 please
17 # contact: legal@canonical.com
18 #
19 # To get in touch with the maintainers, please contact:
20 # osm-charmers@lists.launchpad.net
21 ##
22
23 # pylint: disable=E0213
24
25
26 from ipaddress import ip_network
27 import logging
28 from typing import NoReturn, Optional
29 from urllib.parse import urlparse
30
31
32 from ops.main import main
33 from opslib.osm.charm import CharmedOsmBase, RelationsMissing
34 from opslib.osm.interfaces.http import HttpServer
35 from opslib.osm.interfaces.kafka import KafkaClient
36 from opslib.osm.interfaces.keystone import KeystoneClient
37 from opslib.osm.interfaces.mongo import MongoClient
38 from opslib.osm.interfaces.prometheus import PrometheusClient
39 from opslib.osm.pod import (
40 ContainerV3Builder,
41 IngressResourceV3Builder,
42 PodSpecV3Builder,
43 )
44 from opslib.osm.validator import ModelValidator, validator
45
46
47 logger = logging.getLogger(__name__)
48
49 PORT = 9999
50
51
52 class ConfigModel(ModelValidator):
53 enable_test: bool
54 auth_backend: str
55 database_commonkey: str
56 log_level: str
57 max_file_size: int
58 site_url: Optional[str]
59 cluster_issuer: Optional[str]
60 ingress_whitelist_source_range: Optional[str]
61 tls_secret_name: Optional[str]
62 mongodb_uri: Optional[str]
63
64 @validator("auth_backend")
65 def validate_auth_backend(cls, v):
66 if v not in {"internal", "keystone"}:
67 raise ValueError("value must be 'internal' or 'keystone'")
68 return v
69
70 @validator("log_level")
71 def validate_log_level(cls, v):
72 if v not in {"INFO", "DEBUG"}:
73 raise ValueError("value must be INFO or DEBUG")
74 return v
75
76 @validator("max_file_size")
77 def validate_max_file_size(cls, v):
78 if v < 0:
79 raise ValueError("value must be equal or greater than 0")
80 return v
81
82 @validator("site_url")
83 def validate_site_url(cls, v):
84 if v:
85 parsed = urlparse(v)
86 if not parsed.scheme.startswith("http"):
87 raise ValueError("value must start with http")
88 return v
89
90 @validator("ingress_whitelist_source_range")
91 def validate_ingress_whitelist_source_range(cls, v):
92 if v:
93 ip_network(v)
94 return v
95
96 @validator("mongodb_uri")
97 def validate_mongodb_uri(cls, v):
98 if v and not v.startswith("mongodb://"):
99 raise ValueError("mongodb_uri is not properly formed")
100 return v
101
102
103 class NbiCharm(CharmedOsmBase):
104 def __init__(self, *args) -> NoReturn:
105 super().__init__(*args, oci_image="image")
106
107 self.kafka_client = KafkaClient(self, "kafka")
108 self.framework.observe(self.on["kafka"].relation_changed, self.configure_pod)
109 self.framework.observe(self.on["kafka"].relation_broken, self.configure_pod)
110
111 self.mongodb_client = MongoClient(self, "mongodb")
112 self.framework.observe(self.on["mongodb"].relation_changed, self.configure_pod)
113 self.framework.observe(self.on["mongodb"].relation_broken, self.configure_pod)
114
115 self.prometheus_client = PrometheusClient(self, "prometheus")
116 self.framework.observe(
117 self.on["prometheus"].relation_changed, self.configure_pod
118 )
119 self.framework.observe(
120 self.on["prometheus"].relation_broken, self.configure_pod
121 )
122
123 self.keystone_client = KeystoneClient(self, "keystone")
124 self.framework.observe(self.on["keystone"].relation_changed, self.configure_pod)
125 self.framework.observe(self.on["keystone"].relation_broken, self.configure_pod)
126
127 self.http_server = HttpServer(self, "nbi")
128 self.framework.observe(self.on["nbi"].relation_joined, self._publish_nbi_info)
129
130 def _publish_nbi_info(self, event):
131 """Publishes NBI information.
132
133 Args:
134 event (EventBase): RO relation event.
135 """
136 if self.unit.is_leader():
137 self.http_server.publish_info(self.app.name, PORT)
138
139 def _check_missing_dependencies(self, config: ConfigModel):
140 missing_relations = []
141
142 if self.kafka_client.is_missing_data_in_unit():
143 missing_relations.append("kafka")
144 if not config.mongodb_uri and self.mongodb_client.is_missing_data_in_unit():
145 missing_relations.append("mongodb")
146 if self.prometheus_client.is_missing_data_in_app():
147 missing_relations.append("prometheus")
148 if config.auth_backend == "keystone":
149 if self.keystone_client.is_missing_data_in_app():
150 missing_relations.append("keystone")
151
152 if missing_relations:
153 raise RelationsMissing(missing_relations)
154
155 def build_pod_spec(self, image_info):
156 # Validate config
157 config = ConfigModel(**dict(self.config))
158
159 if config.mongodb_uri and not self.mongodb_client.is_missing_data_in_unit():
160 raise Exception("Mongodb data cannot be provided via config and relation")
161
162 # Check relations
163 self._check_missing_dependencies(config)
164
165 # Create Builder for the PodSpec
166 pod_spec_builder = PodSpecV3Builder()
167
168 # Build Init Container
169 pod_spec_builder.add_init_container(
170 {
171 "name": "init-check",
172 "image": "alpine:latest",
173 "command": [
174 "sh",
175 "-c",
176 f"until (nc -zvw1 {self.kafka_client.host} {self.kafka_client.port} ); do sleep 3; done; exit 0",
177 ],
178 }
179 )
180
181 # Build Container
182 container_builder = ContainerV3Builder(self.app.name, image_info)
183 container_builder.add_port(name=self.app.name, port=PORT)
184 container_builder.add_tcpsocket_readiness_probe(
185 PORT,
186 initial_delay_seconds=5,
187 timeout_seconds=5,
188 )
189 container_builder.add_tcpsocket_liveness_probe(
190 PORT,
191 initial_delay_seconds=45,
192 timeout_seconds=10,
193 )
194 container_builder.add_envs(
195 {
196 # General configuration
197 "ALLOW_ANONYMOUS_LOGIN": "yes",
198 "OSMNBI_SERVER_ENABLE_TEST": config.enable_test,
199 "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public",
200 # Kafka configuration
201 "OSMNBI_MESSAGE_HOST": self.kafka_client.host,
202 "OSMNBI_MESSAGE_DRIVER": "kafka",
203 "OSMNBI_MESSAGE_PORT": self.kafka_client.port,
204 # Database configuration
205 "OSMNBI_DATABASE_DRIVER": "mongo",
206 "OSMNBI_DATABASE_URI": config.mongodb_uri
207 or self.mongodb_client.connection_string,
208 "OSMNBI_DATABASE_COMMONKEY": config.database_commonkey,
209 # Storage configuration
210 "OSMNBI_STORAGE_DRIVER": "mongo",
211 "OSMNBI_STORAGE_PATH": "/app/storage",
212 "OSMNBI_STORAGE_COLLECTION": "files",
213 "OSMNBI_STORAGE_URI": config.mongodb_uri
214 or self.mongodb_client.connection_string,
215 # Prometheus configuration
216 "OSMNBI_PROMETHEUS_HOST": self.prometheus_client.hostname,
217 "OSMNBI_PROMETHEUS_PORT": self.prometheus_client.port,
218 # Log configuration
219 "OSMNBI_LOG_LEVEL": config.log_level,
220 }
221 )
222 if config.auth_backend == "internal":
223 container_builder.add_env("OSMNBI_AUTHENTICATION_BACKEND", "internal")
224 elif config.auth_backend == "keystone":
225 container_builder.add_envs(
226 {
227 "OSMNBI_AUTHENTICATION_BACKEND": "keystone",
228 "OSMNBI_AUTHENTICATION_AUTH_URL": self.keystone_client.host,
229 "OSMNBI_AUTHENTICATION_AUTH_PORT": self.keystone_client.port,
230 "OSMNBI_AUTHENTICATION_USER_DOMAIN_NAME": self.keystone_client.user_domain_name,
231 "OSMNBI_AUTHENTICATION_PROJECT_DOMAIN_NAME": self.keystone_client.project_domain_name,
232 "OSMNBI_AUTHENTICATION_SERVICE_USERNAME": self.keystone_client.username,
233 "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD": self.keystone_client.password,
234 "OSMNBI_AUTHENTICATION_SERVICE_PROJECT": self.keystone_client.service,
235 }
236 )
237 container = container_builder.build()
238
239 # Add container to pod spec
240 pod_spec_builder.add_container(container)
241
242 # Add ingress resources to pod spec if site url exists
243 if config.site_url:
244 parsed = urlparse(config.site_url)
245 annotations = {
246 "nginx.ingress.kubernetes.io/proxy-body-size": "{}".format(
247 str(config.max_file_size) + "m"
248 if config.max_file_size > 0
249 else config.max_file_size
250 ),
251 "nginx.ingress.kubernetes.io/backend-protocol": "HTTPS",
252 "kubernetes.io/ingress.class": "public",
253 }
254 ingress_resource_builder = IngressResourceV3Builder(
255 f"{self.app.name}-ingress", annotations
256 )
257
258 if config.ingress_whitelist_source_range:
259 annotations[
260 "nginx.ingress.kubernetes.io/whitelist-source-range"
261 ] = config.ingress_whitelist_source_range
262
263 if config.cluster_issuer:
264 annotations["cert-manager.io/cluster-issuer"] = config.cluster_issuer
265
266 if parsed.scheme == "https":
267 ingress_resource_builder.add_tls(
268 [parsed.hostname], config.tls_secret_name
269 )
270 else:
271 annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false"
272
273 ingress_resource_builder.add_rule(parsed.hostname, self.app.name, PORT)
274 ingress_resource = ingress_resource_builder.build()
275 pod_spec_builder.add_ingress_resource(ingress_resource)
276
277 logger.debug(pod_spec_builder.build())
278
279 return pod_spec_builder.build()
280
281
282 if __name__ == "__main__":
283 main(NbiCharm)