Bug 1962 fixed: removed the variable cluster_uuid from init_env method
[osm/N2VC.git] / n2vc / k8s_helm3_conn.py
1 ##
2 # Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U.
3 # This file is part of OSM
4 # All Rights Reserved.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
15 # implied.
16 # See the License for the specific language governing permissions and
17 # limitations under the License.
18 #
19 # For those usages not covered by the Apache License, Version 2.0 please
20 # contact with: nfvlabs@tid.es
21 ##
22 import os
23 import yaml
24
25 from n2vc.k8s_helm_base_conn import K8sHelmBaseConnector
26 from n2vc.exceptions import K8sException
27
28
29 class K8sHelm3Connector(K8sHelmBaseConnector):
30
31 """
32 ####################################################################################
33 ################################### P U B L I C ####################################
34 ####################################################################################
35 """
36
37 def __init__(
38 self,
39 fs: object,
40 db: object,
41 kubectl_command: str = "/usr/bin/kubectl",
42 helm_command: str = "/usr/bin/helm3",
43 log: object = None,
44 on_update_db=None,
45 ):
46 """
47 Initializes helm connector for helm v3
48
49 :param fs: file system for kubernetes and helm configuration
50 :param db: database object to write current operation status
51 :param kubectl_command: path to kubectl executable
52 :param helm_command: path to helm executable
53 :param log: logger
54 :param on_update_db: callback called when k8s connector updates database
55 """
56
57 # parent class
58 K8sHelmBaseConnector.__init__(
59 self,
60 db=db,
61 log=log,
62 fs=fs,
63 kubectl_command=kubectl_command,
64 helm_command=helm_command,
65 on_update_db=on_update_db,
66 )
67
68 self.log.info("K8S Helm3 connector initialized")
69
70 async def install(
71 self,
72 cluster_uuid: str,
73 kdu_model: str,
74 kdu_instance: str,
75 atomic: bool = True,
76 timeout: float = 300,
77 params: dict = None,
78 db_dict: dict = None,
79 kdu_name: str = None,
80 namespace: str = None,
81 **kwargs,
82 ):
83 """Install a helm chart
84
85 :param cluster_uuid str: The UUID of the cluster to install to
86 :param kdu_model str: The name or path of a bundle to install
87 :param kdu_instance: Kdu instance name
88 :param atomic bool: If set, waits until the model is active and resets
89 the cluster on failure.
90 :param timeout int: The time, in seconds, to wait for the install
91 to finish
92 :param params dict: Key-value pairs of instantiation parameters
93 :param kdu_name: Name of the KDU instance to be installed
94 :param namespace: K8s namespace to use for the KDU instance
95
96 :param kwargs: Additional parameters (None yet)
97
98 :return: True if successful
99 """
100
101 self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_uuid))
102
103 # sync local dir
104 self.fs.sync(from_path=cluster_uuid)
105
106 # init env, paths
107 paths, env = self._init_paths_env(
108 cluster_name=cluster_uuid, create_if_not_exist=True
109 )
110
111 # for helm3 if namespace does not exist must create it
112 if namespace and namespace != "kube-system":
113 if not await self._namespace_exists(cluster_uuid, namespace):
114 try:
115 await self._create_namespace(cluster_uuid, namespace)
116 except Exception as e:
117 if not await self._namespace_exists(cluster_uuid, namespace):
118 err_msg = (
119 "namespace {} does not exist in cluster_id {} "
120 "error message: ".format(namespace, e)
121 )
122 self.log.error(err_msg)
123 raise K8sException(err_msg)
124
125 await self._install_impl(
126 cluster_uuid,
127 kdu_model,
128 paths,
129 env,
130 kdu_instance,
131 atomic=atomic,
132 timeout=timeout,
133 params=params,
134 db_dict=db_dict,
135 kdu_name=kdu_name,
136 namespace=namespace,
137 )
138
139 # sync fs
140 self.fs.reverse_sync(from_path=cluster_uuid)
141
142 self.log.debug("Returning kdu_instance {}".format(kdu_instance))
143 return True
144
145 async def inspect_kdu(self, kdu_model: str, repo_url: str = None) -> str:
146
147 self.log.debug(
148 "inspect kdu_model {} from (optional) repo: {}".format(kdu_model, repo_url)
149 )
150
151 return await self._exec_inspect_command(
152 inspect_command="all", kdu_model=kdu_model, repo_url=repo_url
153 )
154
155 """
156 ####################################################################################
157 ################################### P R I V A T E ##################################
158 ####################################################################################
159 """
160
161 def _init_paths_env(self, cluster_name: str, create_if_not_exist: bool = True):
162 """
163 Creates and returns base cluster and kube dirs and returns them.
164 Also created helm3 dirs according to new directory specification, paths are
165 returned and also environment variables that must be provided to execute commands
166
167 Helm 3 directory specification uses XDG categories for variable support:
168 - Cache: $XDG_CACHE_HOME, for example, ${HOME}/.cache/helm/
169 - Configuration: $XDG_CONFIG_HOME, for example, ${HOME}/.config/helm/
170 - Data: $XDG_DATA_HOME, for example ${HOME}/.local/share/helm
171
172 The variables assigned for this paths are:
173 (In the documentation the variables names are $HELM_PATH_CACHE, $HELM_PATH_CONFIG,
174 $HELM_PATH_DATA but looking and helm env the variable names are different)
175 - Cache: $HELM_CACHE_HOME
176 - Config: $HELM_CONFIG_HOME
177 - Data: $HELM_DATA_HOME
178 - helm kubeconfig: $KUBECONFIG
179
180 :param cluster_name: cluster_name
181 :return: Dictionary with config_paths and dictionary with helm environment variables
182 """
183
184 base = self.fs.path
185 if base.endswith("/") or base.endswith("\\"):
186 base = base[:-1]
187
188 # base dir for cluster
189 cluster_dir = base + "/" + cluster_name
190
191 # kube dir
192 kube_dir = cluster_dir + "/" + ".kube"
193 if create_if_not_exist and not os.path.exists(kube_dir):
194 self.log.debug("Creating dir {}".format(kube_dir))
195 os.makedirs(kube_dir)
196
197 helm_path_cache = cluster_dir + "/.cache/helm"
198 if create_if_not_exist and not os.path.exists(helm_path_cache):
199 self.log.debug("Creating dir {}".format(helm_path_cache))
200 os.makedirs(helm_path_cache)
201
202 helm_path_config = cluster_dir + "/.config/helm"
203 if create_if_not_exist and not os.path.exists(helm_path_config):
204 self.log.debug("Creating dir {}".format(helm_path_config))
205 os.makedirs(helm_path_config)
206
207 helm_path_data = cluster_dir + "/.local/share/helm"
208 if create_if_not_exist and not os.path.exists(helm_path_data):
209 self.log.debug("Creating dir {}".format(helm_path_data))
210 os.makedirs(helm_path_data)
211
212 config_filename = kube_dir + "/config"
213
214 # 2 - Prepare dictionary with paths
215 paths = {
216 "kube_dir": kube_dir,
217 "kube_config": config_filename,
218 "cluster_dir": cluster_dir,
219 }
220
221 # 3 - Prepare environment variables
222 env = {
223 "HELM_CACHE_HOME": helm_path_cache,
224 "HELM_CONFIG_HOME": helm_path_config,
225 "HELM_DATA_HOME": helm_path_data,
226 "KUBECONFIG": config_filename,
227 }
228
229 for file_name, file in paths.items():
230 if "dir" in file_name and not os.path.exists(file):
231 err_msg = "{} dir does not exist".format(file)
232 self.log.error(err_msg)
233 raise K8sException(err_msg)
234
235 return paths, env
236
237 async def _namespace_exists(self, cluster_id, namespace) -> bool:
238 self.log.debug(
239 "checking if namespace {} exists cluster_id {}".format(
240 namespace, cluster_id
241 )
242 )
243 namespaces = await self._get_namespaces(cluster_id)
244 return namespace in namespaces if namespaces else False
245
246 async def _get_namespaces(self, cluster_id: str):
247
248 self.log.debug("get namespaces cluster_id {}".format(cluster_id))
249
250 # init config, env
251 paths, env = self._init_paths_env(
252 cluster_name=cluster_id, create_if_not_exist=True
253 )
254
255 command = "{} --kubeconfig={} get namespaces -o=yaml".format(
256 self.kubectl_command, paths["kube_config"]
257 )
258 output, _rc = await self._local_async_exec(
259 command=command, raise_exception_on_error=True, env=env
260 )
261
262 data = yaml.load(output, Loader=yaml.SafeLoader)
263 namespaces = [item["metadata"]["name"] for item in data["items"]]
264 self.log.debug(f"namespaces {namespaces}")
265
266 return namespaces
267
268 async def _create_namespace(self, cluster_id: str, namespace: str):
269
270 self.log.debug(f"create namespace: {cluster_id} for cluster_id: {namespace}")
271
272 # init config, env
273 paths, env = self._init_paths_env(
274 cluster_name=cluster_id, create_if_not_exist=True
275 )
276
277 command = "{} --kubeconfig={} create namespace {}".format(
278 self.kubectl_command, paths["kube_config"], namespace
279 )
280 _, _rc = await self._local_async_exec(
281 command=command, raise_exception_on_error=True, env=env
282 )
283 self.log.debug(f"namespace {namespace} created")
284
285 return _rc
286
287 async def _get_services(
288 self, cluster_id: str, kdu_instance: str, namespace: str, kubeconfig: str
289 ):
290
291 # init config, env
292 paths, env = self._init_paths_env(
293 cluster_name=cluster_id, create_if_not_exist=True
294 )
295
296 command1 = "env KUBECONFIG={} {} get manifest {} --namespace={}".format(
297 kubeconfig, self._helm_command, kdu_instance, namespace
298 )
299 command2 = "{} get --namespace={} -f -".format(self.kubectl_command, namespace)
300 output, _rc = await self._local_async_exec_pipe(
301 command1, command2, env=env, raise_exception_on_error=True
302 )
303 services = self._parse_services(output)
304
305 return services
306
307 async def _cluster_init(self, cluster_id, namespace, paths, env):
308 """
309 Implements the helm version dependent cluster initialization:
310 For helm3 it creates the namespace if it is not created
311 """
312 if namespace != "kube-system":
313 namespaces = await self._get_namespaces(cluster_id)
314 if namespace not in namespaces:
315 await self._create_namespace(cluster_id, namespace)
316
317 repo_list = await self.repo_list(cluster_id)
318 stable_repo = [repo for repo in repo_list if repo["name"] == "stable"]
319 if not stable_repo and self._stable_repo_url:
320 await self.repo_add(cluster_id, "stable", self._stable_repo_url)
321
322 # Returns False as no software needs to be uninstalled
323 return False
324
325 async def _uninstall_sw(self, cluster_id: str, namespace: str):
326 # nothing to do to uninstall sw
327 pass
328
329 async def _instances_list(self, cluster_id: str):
330
331 # init paths, env
332 paths, env = self._init_paths_env(
333 cluster_name=cluster_id, create_if_not_exist=True
334 )
335
336 command = "{} list --all-namespaces --output yaml".format(self._helm_command)
337 output, _rc = await self._local_async_exec(
338 command=command, raise_exception_on_error=True, env=env
339 )
340
341 if output and len(output) > 0:
342 self.log.debug("instances list output: {}".format(output))
343 return yaml.load(output, Loader=yaml.SafeLoader)
344 else:
345 return []
346
347 def _get_inspect_command(
348 self, inspect_command: str, kdu_model: str, repo_str: str, version: str
349 ):
350 inspect_command = "{} show {} {}{} {}".format(
351 self._helm_command, inspect_command, kdu_model, repo_str, version
352 )
353 return inspect_command
354
355 def _get_get_command(
356 self, get_command: str, kdu_instance: str, namespace: str, kubeconfig: str
357 ):
358 get_command = (
359 "env KUBECONFIG={} {} get {} {} --namespace={} --output yaml".format(
360 kubeconfig, self._helm_command, get_command, kdu_instance, namespace
361 )
362 )
363 return get_command
364
365 async def _status_kdu(
366 self,
367 cluster_id: str,
368 kdu_instance: str,
369 namespace: str = None,
370 show_error_log: bool = False,
371 return_text: bool = False,
372 ):
373
374 self.log.debug(
375 "status of kdu_instance: {}, namespace: {} ".format(kdu_instance, namespace)
376 )
377
378 if not namespace:
379 namespace = "kube-system"
380
381 # init config, env
382 paths, env = self._init_paths_env(
383 cluster_name=cluster_id, create_if_not_exist=True
384 )
385 command = "env KUBECONFIG={} {} status {} --namespace={} --output yaml".format(
386 paths["kube_config"], self._helm_command, kdu_instance, namespace
387 )
388
389 output, rc = await self._local_async_exec(
390 command=command,
391 raise_exception_on_error=True,
392 show_error_log=show_error_log,
393 env=env,
394 )
395
396 if return_text:
397 return str(output)
398
399 if rc != 0:
400 return None
401
402 data = yaml.load(output, Loader=yaml.SafeLoader)
403
404 # remove field 'notes' and manifest
405 try:
406 del data.get("info")["notes"]
407 del data["manifest"]
408 except KeyError:
409 pass
410
411 # unable to parse 'resources' as currently it is not included in helm3
412 return data
413
414 def _get_install_command(
415 self,
416 kdu_model: str,
417 kdu_instance: str,
418 namespace: str,
419 params_str: str,
420 version: str,
421 atomic: bool,
422 timeout: float,
423 kubeconfig: str,
424 ) -> str:
425
426 timeout_str = ""
427 if timeout:
428 timeout_str = "--timeout {}s".format(timeout)
429
430 # atomic
431 atomic_str = ""
432 if atomic:
433 atomic_str = "--atomic"
434 # namespace
435 namespace_str = ""
436 if namespace:
437 namespace_str = "--namespace {}".format(namespace)
438
439 # version
440 version_str = ""
441 if version:
442 version_str = "--version {}".format(version)
443
444 command = (
445 "env KUBECONFIG={kubeconfig} {helm} install {name} {atomic} --output yaml "
446 "{params} {timeout} {ns} {model} {ver}".format(
447 kubeconfig=kubeconfig,
448 helm=self._helm_command,
449 name=kdu_instance,
450 atomic=atomic_str,
451 params=params_str,
452 timeout=timeout_str,
453 ns=namespace_str,
454 model=kdu_model,
455 ver=version_str,
456 )
457 )
458 return command
459
460 def _get_upgrade_scale_command(
461 self,
462 kdu_model: str,
463 kdu_instance: str,
464 namespace: str,
465 scale: int,
466 version: str,
467 atomic: bool,
468 replica_str: str,
469 timeout: float,
470 resource_name: str,
471 kubeconfig: str,
472 ) -> str:
473
474 timeout_str = ""
475 if timeout:
476 timeout_str = "--timeout {}s".format(timeout)
477
478 # atomic
479 atomic_str = ""
480 if atomic:
481 atomic_str = "--atomic"
482
483 # version
484 version_str = ""
485 if version:
486 version_str = "--version {}".format(version)
487
488 # namespace
489 namespace_str = ""
490 if namespace:
491 namespace_str = "--namespace {}".format(namespace)
492
493 # scale
494 if resource_name:
495 scale_dict = {"{}.{}".format(resource_name, replica_str): scale}
496 else:
497 scale_dict = {replica_str: scale}
498
499 scale_str = self._params_to_set_option(scale_dict)
500
501 command = (
502 "env KUBECONFIG={kubeconfig} {helm} upgrade {name} {model} {namespace} {atomic} --output yaml {scale} "
503 "{timeout} {ver}"
504 ).format(
505 helm=self._helm_command,
506 name=kdu_instance,
507 namespace=namespace_str,
508 atomic=atomic_str,
509 scale=scale_str,
510 timeout=timeout_str,
511 model=kdu_model,
512 ver=version_str,
513 kubeconfig=kubeconfig,
514 )
515 return command
516
517 def _get_upgrade_command(
518 self,
519 kdu_model: str,
520 kdu_instance: str,
521 namespace: str,
522 params_str: str,
523 version: str,
524 atomic: bool,
525 timeout: float,
526 kubeconfig: str,
527 ) -> str:
528
529 timeout_str = ""
530 if timeout:
531 timeout_str = "--timeout {}s".format(timeout)
532
533 # atomic
534 atomic_str = ""
535 if atomic:
536 atomic_str = "--atomic"
537
538 # version
539 version_str = ""
540 if version:
541 version_str = "--version {}".format(version)
542
543 # namespace
544 namespace_str = ""
545 if namespace:
546 namespace_str = "--namespace {}".format(namespace)
547
548 command = (
549 "env KUBECONFIG={kubeconfig} {helm} upgrade {name} {model} {namespace} {atomic} "
550 "--output yaml {params} {timeout} {ver}"
551 ).format(
552 kubeconfig=kubeconfig,
553 helm=self._helm_command,
554 name=kdu_instance,
555 namespace=namespace_str,
556 atomic=atomic_str,
557 params=params_str,
558 timeout=timeout_str,
559 model=kdu_model,
560 ver=version_str,
561 )
562 return command
563
564 def _get_rollback_command(
565 self, kdu_instance: str, namespace: str, revision: float, kubeconfig: str
566 ) -> str:
567 return "env KUBECONFIG={} {} rollback {} {} --namespace={} --wait".format(
568 kubeconfig, self._helm_command, kdu_instance, revision, namespace
569 )
570
571 def _get_uninstall_command(
572 self, kdu_instance: str, namespace: str, kubeconfig: str
573 ) -> str:
574
575 return "env KUBECONFIG={} {} uninstall {} --namespace={}".format(
576 kubeconfig, self._helm_command, kdu_instance, namespace
577 )
578
579 def _get_helm_chart_repos_ids(self, cluster_uuid) -> list:
580 repo_ids = []
581 cluster_filter = {"_admin.helm-chart-v3.id": cluster_uuid}
582 cluster = self.db.get_one("k8sclusters", cluster_filter)
583 if cluster:
584 repo_ids = cluster.get("_admin").get("helm_chart_repos") or []
585 return repo_ids
586 else:
587 raise K8sException(
588 "k8cluster with helm-id : {} not found".format(cluster_uuid)
589 )