Fix bug 1400: Change repo stable for helm2
[osm/N2VC.git] / n2vc / k8s_helm3_conn.py
1 ##
2 # Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U.
3 # This file is part of OSM
4 # All Rights Reserved.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
15 # implied.
16 # See the License for the specific language governing permissions and
17 # limitations under the License.
18 #
19 # For those usages not covered by the Apache License, Version 2.0 please
20 # contact with: nfvlabs@tid.es
21 ##
22 import os
23 import yaml
24
25 from n2vc.k8s_helm_base_conn import K8sHelmBaseConnector
26 from n2vc.exceptions import K8sException
27
28
29 class K8sHelm3Connector(K8sHelmBaseConnector):
30
31 """
32 ####################################################################################
33 ################################### P U B L I C ####################################
34 ####################################################################################
35 """
36
37 def __init__(
38 self,
39 fs: object,
40 db: object,
41 kubectl_command: str = "/usr/bin/kubectl",
42 helm_command: str = "/usr/bin/helm3",
43 log: object = None,
44 on_update_db=None,
45 vca_config: dict = None,
46 ):
47 """
48 Initializes helm connector for helm v3
49
50 :param fs: file system for kubernetes and helm configuration
51 :param db: database object to write current operation status
52 :param kubectl_command: path to kubectl executable
53 :param helm_command: path to helm executable
54 :param log: logger
55 :param on_update_db: callback called when k8s connector updates database
56 """
57
58 # parent class
59 K8sHelmBaseConnector.__init__(self,
60 db=db,
61 log=log,
62 fs=fs,
63 kubectl_command=kubectl_command,
64 helm_command=helm_command,
65 on_update_db=on_update_db,
66 vca_config=vca_config)
67
68 self.log.info("K8S Helm3 connector initialized")
69
70 async def install(
71 self,
72 cluster_uuid: str,
73 kdu_model: str,
74 atomic: bool = True,
75 timeout: float = 300,
76 params: dict = None,
77 db_dict: dict = None,
78 kdu_name: str = None,
79 namespace: str = None,
80 ):
81 _, cluster_id = self._get_namespace_cluster_id(cluster_uuid)
82 self.log.debug("installing {} in cluster {}".format(kdu_model, cluster_id))
83
84 # sync local dir
85 self.fs.sync(from_path=cluster_id)
86
87 # init env, paths
88 paths, env = self._init_paths_env(
89 cluster_name=cluster_id, create_if_not_exist=True
90 )
91
92 # for helm3 if namespace does not exist must create it
93 if namespace and namespace != "kube-system":
94 namespaces = await self._get_namespaces(cluster_id)
95 if namespace not in namespaces:
96 await self._create_namespace(cluster_id, namespace)
97
98 kdu_instance = await self._install_impl(cluster_id,
99 kdu_model,
100 paths,
101 env,
102 atomic=atomic,
103 timeout=timeout,
104 params=params,
105 db_dict=db_dict,
106 kdu_name=kdu_name,
107 namespace=namespace)
108
109 # sync fs
110 self.fs.reverse_sync(from_path=cluster_id)
111
112 self.log.debug("Returning kdu_instance {}".format(kdu_instance))
113 return kdu_instance
114
115 async def inspect_kdu(self, kdu_model: str, repo_url: str = None) -> str:
116
117 self.log.debug(
118 "inspect kdu_model {} from (optional) repo: {}".format(kdu_model, repo_url)
119 )
120
121 return await self._exec_inspect_comand(
122 inspect_command="all", kdu_model=kdu_model, repo_url=repo_url
123 )
124
125 """
126 ####################################################################################
127 ################################### P R I V A T E ##################################
128 ####################################################################################
129 """
130
131 def _init_paths_env(self, cluster_name: str, create_if_not_exist: bool = True):
132 """
133 Creates and returns base cluster and kube dirs and returns them.
134 Also created helm3 dirs according to new directory specification, paths are
135 returned and also environment variables that must be provided to execute commands
136
137 Helm 3 directory specification uses XDG categories for variable support:
138 - Cache: $XDG_CACHE_HOME, for example, ${HOME}/.cache/helm/
139 - Configuration: $XDG_CONFIG_HOME, for example, ${HOME}/.config/helm/
140 - Data: $XDG_DATA_HOME, for example ${HOME}/.local/share/helm
141
142 The variables assigned for this paths are:
143 (In the documentation the variables names are $HELM_PATH_CACHE, $HELM_PATH_CONFIG,
144 $HELM_PATH_DATA but looking and helm env the variable names are different)
145 - Cache: $HELM_CACHE_HOME
146 - Config: $HELM_CONFIG_HOME
147 - Data: $HELM_DATA_HOME
148 - helm kubeconfig: $KUBECONFIG
149
150 :param cluster_name: cluster_name
151 :return: Dictionary with config_paths and dictionary with helm environment variables
152 """
153
154 base = self.fs.path
155 if base.endswith("/") or base.endswith("\\"):
156 base = base[:-1]
157
158 # base dir for cluster
159 cluster_dir = base + "/" + cluster_name
160
161 # kube dir
162 kube_dir = cluster_dir + "/" + ".kube"
163 if create_if_not_exist and not os.path.exists(kube_dir):
164 self.log.debug("Creating dir {}".format(kube_dir))
165 os.makedirs(kube_dir)
166
167 helm_path_cache = cluster_dir + "/.cache/helm"
168 if create_if_not_exist and not os.path.exists(helm_path_cache):
169 self.log.debug("Creating dir {}".format(helm_path_cache))
170 os.makedirs(helm_path_cache)
171
172 helm_path_config = cluster_dir + "/.config/helm"
173 if create_if_not_exist and not os.path.exists(helm_path_config):
174 self.log.debug("Creating dir {}".format(helm_path_config))
175 os.makedirs(helm_path_config)
176
177 helm_path_data = cluster_dir + "/.local/share/helm"
178 if create_if_not_exist and not os.path.exists(helm_path_data):
179 self.log.debug("Creating dir {}".format(helm_path_data))
180 os.makedirs(helm_path_data)
181
182 config_filename = kube_dir + "/config"
183
184 # 2 - Prepare dictionary with paths
185 paths = {
186 "kube_dir": kube_dir,
187 "kube_config": config_filename,
188 "cluster_dir": cluster_dir
189 }
190
191 # 3 - Prepare environment variables
192 env = {
193 "HELM_CACHE_HOME": helm_path_cache,
194 "HELM_CONFIG_HOME": helm_path_config,
195 "HELM_DATA_HOME": helm_path_data,
196 "KUBECONFIG": config_filename
197 }
198
199 for file_name, file in paths.items():
200 if "dir" in file_name and not os.path.exists(file):
201 err_msg = "{} dir does not exist".format(file)
202 self.log.error(err_msg)
203 raise K8sException(err_msg)
204
205 return paths, env
206
207 async def _get_namespaces(self,
208 cluster_id: str):
209
210 self.log.debug("get namespaces cluster_id {}".format(cluster_id))
211
212 # init config, env
213 paths, env = self._init_paths_env(
214 cluster_name=cluster_id, create_if_not_exist=True
215 )
216
217 command = "{} --kubeconfig={} get namespaces -o=yaml".format(
218 self.kubectl_command, paths["kube_config"]
219 )
220 output, _rc = await self._local_async_exec(
221 command=command, raise_exception_on_error=True, env=env
222 )
223
224 data = yaml.load(output, Loader=yaml.SafeLoader)
225 namespaces = [item["metadata"]["name"] for item in data["items"]]
226 self.log.debug(f"namespaces {namespaces}")
227
228 return namespaces
229
230 async def _create_namespace(self,
231 cluster_id: str,
232 namespace: str):
233
234 self.log.debug(f"create namespace: {cluster_id} for cluster_id: {namespace}")
235
236 # init config, env
237 paths, env = self._init_paths_env(
238 cluster_name=cluster_id, create_if_not_exist=True
239 )
240
241 command = "{} --kubeconfig={} create namespace {}".format(
242 self.kubectl_command, paths["kube_config"], namespace
243 )
244 _, _rc = await self._local_async_exec(
245 command=command, raise_exception_on_error=True, env=env
246 )
247 self.log.debug(f"namespace {namespace} created")
248
249 return _rc
250
251 async def _get_services(self, cluster_id: str, kdu_instance: str, namespace: str):
252
253 # init config, env
254 paths, env = self._init_paths_env(
255 cluster_name=cluster_id, create_if_not_exist=True
256 )
257
258 command1 = "{} get manifest {} --namespace={}".format(
259 self._helm_command, kdu_instance, namespace
260 )
261 command2 = "{} get --namespace={} -f -".format(
262 self.kubectl_command, namespace
263 )
264 output, _rc = await self._local_async_exec_pipe(
265 command1, command2, env=env, raise_exception_on_error=True
266 )
267 services = self._parse_services(output)
268
269 return services
270
271 async def _cluster_init(self, cluster_id, namespace, paths, env):
272 """
273 Implements the helm version dependent cluster initialization:
274 For helm3 it creates the namespace if it is not created
275 """
276 if namespace != "kube-system":
277 namespaces = await self._get_namespaces(cluster_id)
278 if namespace not in namespaces:
279 await self._create_namespace(cluster_id, namespace)
280
281 # If default repo is not included add
282 cluster_uuid = "{}:{}".format(namespace, cluster_id)
283 repo_list = await self.repo_list(cluster_uuid)
284 for repo in repo_list:
285 self.log.debug("repo")
286 if repo["name"] == "stable":
287 self.log.debug("Default repo already present")
288 break
289 else:
290 await self.repo_add(cluster_uuid,
291 "stable",
292 self._stable_repo_url)
293
294 # Returns False as no software needs to be uninstalled
295 return False
296
297 async def _uninstall_sw(self, cluster_id: str, namespace: str):
298 # nothing to do to uninstall sw
299 pass
300
301 async def _instances_list(self, cluster_id: str):
302
303 # init paths, env
304 paths, env = self._init_paths_env(
305 cluster_name=cluster_id, create_if_not_exist=True
306 )
307
308 command = "{} list --all-namespaces --output yaml".format(
309 self._helm_command
310 )
311 output, _rc = await self._local_async_exec(
312 command=command, raise_exception_on_error=True, env=env
313 )
314
315 if output and len(output) > 0:
316 self.log.debug("instances list output: {}".format(output))
317 return yaml.load(output, Loader=yaml.SafeLoader)
318 else:
319 return []
320
321 def _get_inspect_command(self, inspect_command: str, kdu_model: str, repo_str: str,
322 version: str):
323 inspect_command = "{} show {} {}{} {}".format(
324 self._helm_command, inspect_command, kdu_model, repo_str, version
325 )
326 return inspect_command
327
328 async def _status_kdu(
329 self,
330 cluster_id: str,
331 kdu_instance: str,
332 namespace: str = None,
333 show_error_log: bool = False,
334 return_text: bool = False,
335 ):
336
337 self.log.debug("status of kdu_instance: {}, namespace: {} ".format(kdu_instance, namespace))
338
339 if not namespace:
340 namespace = "kube-system"
341
342 # init config, env
343 paths, env = self._init_paths_env(
344 cluster_name=cluster_id, create_if_not_exist=True
345 )
346 command = "{} status {} --namespace={} --output yaml".format(
347 self._helm_command, kdu_instance, namespace
348 )
349
350 output, rc = await self._local_async_exec(
351 command=command,
352 raise_exception_on_error=True,
353 show_error_log=show_error_log,
354 env=env
355 )
356
357 if return_text:
358 return str(output)
359
360 if rc != 0:
361 return None
362
363 data = yaml.load(output, Loader=yaml.SafeLoader)
364
365 # remove field 'notes' and manifest
366 try:
367 del data.get("info")["notes"]
368 del data["manifest"]
369 except KeyError:
370 pass
371
372 # unable to parse 'resources' as currently it is not included in helm3
373 return data
374
375 def _get_install_command(self, kdu_model: str, kdu_instance: str, namespace: str,
376 params_str: str, version: str, atomic: bool, timeout: float) -> str:
377
378 timeout_str = ""
379 if timeout:
380 timeout_str = "--timeout {}s".format(timeout)
381
382 # atomic
383 atomic_str = ""
384 if atomic:
385 atomic_str = "--atomic"
386 # namespace
387 namespace_str = ""
388 if namespace:
389 namespace_str = "--namespace {}".format(namespace)
390
391 # version
392 version_str = ""
393 if version:
394 version_str = "--version {}".format(version)
395
396 command = (
397 "{helm} install {name} {atomic} --output yaml "
398 "{params} {timeout} {ns} {model} {ver}".format(
399 helm=self._helm_command,
400 name=kdu_instance,
401 atomic=atomic_str,
402 params=params_str,
403 timeout=timeout_str,
404 ns=namespace_str,
405 model=kdu_model,
406 ver=version_str,
407 )
408 )
409 return command
410
411 def _get_upgrade_command(self, kdu_model: str, kdu_instance: str, namespace: str,
412 params_str: str, version: str, atomic: bool, timeout: float) -> str:
413
414 timeout_str = ""
415 if timeout:
416 timeout_str = "--timeout {}s".format(timeout)
417
418 # atomic
419 atomic_str = ""
420 if atomic:
421 atomic_str = "--atomic"
422
423 # version
424 version_str = ""
425 if version:
426 version_str = "--version {}".format(version)
427
428 # namespace
429 namespace_str = ""
430 if namespace:
431 namespace_str = "--namespace {}".format(namespace)
432
433 command = (
434 "{helm} upgrade {name} {model} {namespace} {atomic} --output yaml {params} "
435 "{timeout} {ver}".format(
436 helm=self._helm_command,
437 name=kdu_instance,
438 namespace=namespace_str,
439 atomic=atomic_str,
440 params=params_str,
441 timeout=timeout_str,
442 model=kdu_model,
443 ver=version_str,
444 )
445 )
446 return command
447
448 def _get_rollback_command(self, kdu_instance: str, namespace: str, revision: float) -> str:
449 return "{} rollback {} {} --namespace={} --wait".format(
450 self._helm_command, kdu_instance, revision, namespace
451 )
452
453 def _get_uninstall_command(self, kdu_instance: str, namespace: str) -> str:
454
455 return "{} uninstall {} --namespace={}".format(
456 self._helm_command, kdu_instance, namespace)
457
458 def _get_helm_chart_repos_ids(self, cluster_uuid) -> list:
459 repo_ids = []
460 cluster_filter = {"_admin.helm-chart-v3.id": cluster_uuid}
461 cluster = self.db.get_one("k8sclusters", cluster_filter)
462 if cluster:
463 repo_ids = cluster.get("_admin").get("helm_chart_repos") or []
464 return repo_ids
465 else:
466 raise K8sException(
467 "k8cluster with helm-id : {} not found".format(cluster_uuid)
468 )