Fixed issue canceling status task
[osm/N2VC.git] / n2vc / k8s_helm_conn.py
1 ##
2 # Copyright 2019 Telefonica Investigacion y Desarrollo, S.A.U.
3 # This file is part of OSM
4 # All Rights Reserved.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
15 # implied.
16 # See the License for the specific language governing permissions and
17 # limitations under the License.
18 #
19 # For those usages not covered by the Apache License, Version 2.0 please
20 # contact with: nfvlabs@tid.es
21 ##
22
23 import subprocess
24 import os
25 import shutil
26 import asyncio
27 import time
28 import yaml
29 from uuid import uuid4
30 import random
31 from n2vc.k8s_conn import K8sConnector
32 from n2vc.exceptions import K8sException
33
34
35 class K8sHelmConnector(K8sConnector):
36
37 """
38 ##################################################################################################
39 ########################################## P U B L I C ###########################################
40 ##################################################################################################
41 """
42
43 def __init__(
44 self,
45 fs: object,
46 db: object,
47 kubectl_command: str = '/usr/bin/kubectl',
48 helm_command: str = '/usr/bin/helm',
49 log: object = None,
50 on_update_db=None
51 ):
52 """
53
54 :param fs: file system for kubernetes and helm configuration
55 :param db: database object to write current operation status
56 :param kubectl_command: path to kubectl executable
57 :param helm_command: path to helm executable
58 :param log: logger
59 :param on_update_db: callback called when k8s connector updates database
60 """
61
62 # parent class
63 K8sConnector.__init__(
64 self,
65 db=db,
66 log=log,
67 on_update_db=on_update_db
68 )
69
70 self.info('Initializing K8S Helm connector')
71
72 # random numbers for release name generation
73 random.seed(time.time())
74
75 # the file system
76 self.fs = fs
77
78 # exception if kubectl is not installed
79 self.kubectl_command = kubectl_command
80 self._check_file_exists(filename=kubectl_command, exception_if_not_exists=True)
81
82 # exception if helm is not installed
83 self._helm_command = helm_command
84 self._check_file_exists(filename=helm_command, exception_if_not_exists=True)
85
86 # initialize helm client-only
87 self.debug('Initializing helm client-only...')
88 command = '{} init --client-only'.format(self._helm_command)
89 try:
90 asyncio.ensure_future(self._local_async_exec(command=command, raise_exception_on_error=False))
91 # loop = asyncio.get_event_loop()
92 # loop.run_until_complete(self._local_async_exec(command=command, raise_exception_on_error=False))
93 except Exception as e:
94 self.warning(msg='helm init failed (it was already initialized): {}'.format(e))
95
96 self.info('K8S Helm connector initialized')
97
98 async def init_env(
99 self,
100 k8s_creds: str,
101 namespace: str = 'kube-system',
102 reuse_cluster_uuid=None
103 ) -> (str, bool):
104 """
105 It prepares a given K8s cluster environment to run Charts on both sides:
106 client (OSM)
107 server (Tiller)
108
109 :param k8s_creds: credentials to access a given K8s cluster, i.e. a valid '.kube/config'
110 :param namespace: optional namespace to be used for helm. By default, 'kube-system' will be used
111 :param reuse_cluster_uuid: existing cluster uuid for reuse
112 :return: uuid of the K8s cluster and True if connector has installed some software in the cluster
113 (on error, an exception will be raised)
114 """
115
116 cluster_uuid = reuse_cluster_uuid
117 if not cluster_uuid:
118 cluster_uuid = str(uuid4())
119
120 self.debug('Initializing K8S environment. namespace: {}'.format(namespace))
121
122 # create config filename
123 kube_dir, helm_dir, config_filename, cluster_dir = \
124 self._get_paths(cluster_name=cluster_uuid, create_if_not_exist=True)
125 f = open(config_filename, "w")
126 f.write(k8s_creds)
127 f.close()
128
129 # check if tiller pod is up in cluster
130 command = '{} --kubeconfig={} --namespace={} get deployments'\
131 .format(self.kubectl_command, config_filename, namespace)
132 output, rc = await self._local_async_exec(command=command, raise_exception_on_error=True)
133
134 output_table = K8sHelmConnector._output_to_table(output=output)
135
136 # find 'tiller' pod in all pods
137 already_initialized = False
138 try:
139 for row in output_table:
140 if row[0].startswith('tiller-deploy'):
141 already_initialized = True
142 break
143 except Exception as e:
144 pass
145
146 # helm init
147 n2vc_installed_sw = False
148 if not already_initialized:
149 self.info('Initializing helm in client and server: {}'.format(cluster_uuid))
150 command = '{} --kubeconfig={} --tiller-namespace={} --home={} init'\
151 .format(self._helm_command, config_filename, namespace, helm_dir)
152 output, rc = await self._local_async_exec(command=command, raise_exception_on_error=True)
153 n2vc_installed_sw = True
154 else:
155 # check client helm installation
156 check_file = helm_dir + '/repository/repositories.yaml'
157 if not self._check_file_exists(filename=check_file, exception_if_not_exists=False):
158 self.info('Initializing helm in client: {}'.format(cluster_uuid))
159 command = '{} --kubeconfig={} --tiller-namespace={} --home={} init --client-only'\
160 .format(self._helm_command, config_filename, namespace, helm_dir)
161 output, rc = await self._local_async_exec(command=command, raise_exception_on_error=True)
162 else:
163 self.info('Helm client already initialized')
164
165 self.info('Cluster initialized {}'.format(cluster_uuid))
166
167 return cluster_uuid, n2vc_installed_sw
168
169 async def repo_add(
170 self,
171 cluster_uuid: str,
172 name: str,
173 url: str,
174 repo_type: str = 'chart'
175 ):
176
177 self.debug('adding {} repository {}. URL: {}'.format(repo_type, name, url))
178
179 # config filename
180 kube_dir, helm_dir, config_filename, cluster_dir = \
181 self._get_paths(cluster_name=cluster_uuid, create_if_not_exist=True)
182
183 # helm repo update
184 command = '{} --kubeconfig={} --home={} repo update'.format(self._helm_command, config_filename, helm_dir)
185 self.debug('updating repo: {}'.format(command))
186 await self._local_async_exec(command=command, raise_exception_on_error=False)
187
188 # helm repo add name url
189 command = '{} --kubeconfig={} --home={} repo add {} {}'\
190 .format(self._helm_command, config_filename, helm_dir, name, url)
191 self.debug('adding repo: {}'.format(command))
192 await self._local_async_exec(command=command, raise_exception_on_error=True)
193
194 async def repo_list(
195 self,
196 cluster_uuid: str
197 ) -> list:
198 """
199 Get the list of registered repositories
200
201 :return: list of registered repositories: [ (name, url) .... ]
202 """
203
204 self.debug('list repositories for cluster {}'.format(cluster_uuid))
205
206 # config filename
207 kube_dir, helm_dir, config_filename, cluster_dir = \
208 self._get_paths(cluster_name=cluster_uuid, create_if_not_exist=True)
209
210 command = '{} --kubeconfig={} --home={} repo list --output yaml'\
211 .format(self._helm_command, config_filename, helm_dir)
212
213 output, rc = await self._local_async_exec(command=command, raise_exception_on_error=True)
214 if output and len(output) > 0:
215 return yaml.load(output, Loader=yaml.SafeLoader)
216 else:
217 return []
218
219 async def repo_remove(
220 self,
221 cluster_uuid: str,
222 name: str
223 ):
224 """
225 Remove a repository from OSM
226
227 :param cluster_uuid: the cluster
228 :param name: repo name in OSM
229 :return: True if successful
230 """
231
232 self.debug('list repositories for cluster {}'.format(cluster_uuid))
233
234 # config filename
235 kube_dir, helm_dir, config_filename, cluster_dir = \
236 self._get_paths(cluster_name=cluster_uuid, create_if_not_exist=True)
237
238 command = '{} --kubeconfig={} --home={} repo remove {}'\
239 .format(self._helm_command, config_filename, helm_dir, name)
240
241 await self._local_async_exec(command=command, raise_exception_on_error=True)
242
243 async def reset(
244 self,
245 cluster_uuid: str,
246 force: bool = False,
247 uninstall_sw: bool = False
248 ) -> bool:
249
250 self.debug('Resetting K8s environment. cluster uuid: {}'.format(cluster_uuid))
251
252 # get kube and helm directories
253 kube_dir, helm_dir, config_filename, cluster_dir = \
254 self._get_paths(cluster_name=cluster_uuid, create_if_not_exist=False)
255
256 # uninstall releases if needed
257 releases = await self.instances_list(cluster_uuid=cluster_uuid)
258 if len(releases) > 0:
259 if force:
260 for r in releases:
261 try:
262 kdu_instance = r.get('Name')
263 chart = r.get('Chart')
264 self.debug('Uninstalling {} -> {}'.format(chart, kdu_instance))
265 await self.uninstall(cluster_uuid=cluster_uuid, kdu_instance=kdu_instance)
266 except Exception as e:
267 self.error('Error uninstalling release {}: {}'.format(kdu_instance, e))
268 else:
269 msg = 'Cluster has releases and not force. Cannot reset K8s environment. Cluster uuid: {}'\
270 .format(cluster_uuid)
271 self.error(msg)
272 raise K8sException(msg)
273
274 if uninstall_sw:
275
276 self.debug('Uninstalling tiller from cluster {}'.format(cluster_uuid))
277
278 # find namespace for tiller pod
279 command = '{} --kubeconfig={} get deployments --all-namespaces'\
280 .format(self.kubectl_command, config_filename)
281 output, rc = await self._local_async_exec(command=command, raise_exception_on_error=False)
282 output_table = K8sHelmConnector._output_to_table(output=output)
283 namespace = None
284 for r in output_table:
285 try:
286 if 'tiller-deploy' in r[1]:
287 namespace = r[0]
288 break
289 except Exception as e:
290 pass
291 else:
292 msg = 'Tiller deployment not found in cluster {}'.format(cluster_uuid)
293 self.error(msg)
294
295 self.debug('namespace for tiller: {}'.format(namespace))
296
297 force_str = '--force'
298
299 if namespace:
300 # delete tiller deployment
301 self.debug('Deleting tiller deployment for cluster {}, namespace {}'.format(cluster_uuid, namespace))
302 command = '{} --namespace {} --kubeconfig={} {} delete deployment tiller-deploy'\
303 .format(self.kubectl_command, namespace, config_filename, force_str)
304 await self._local_async_exec(command=command, raise_exception_on_error=False)
305
306 # uninstall tiller from cluster
307 self.debug('Uninstalling tiller from cluster {}'.format(cluster_uuid))
308 command = '{} --kubeconfig={} --home={} reset'\
309 .format(self._helm_command, config_filename, helm_dir)
310 self.debug('resetting: {}'.format(command))
311 output, rc = await self._local_async_exec(command=command, raise_exception_on_error=True)
312 else:
313 self.debug('namespace not found')
314
315 # delete cluster directory
316 dir = self.fs.path + '/' + cluster_uuid
317 self.debug('Removing directory {}'.format(dir))
318 shutil.rmtree(dir, ignore_errors=True)
319
320 return True
321
322 async def install(
323 self,
324 cluster_uuid: str,
325 kdu_model: str,
326 atomic: bool = True,
327 timeout: float = 300,
328 params: dict = None,
329 db_dict: dict = None,
330 kdu_name: str = None
331 ):
332
333 self.debug('installing {} in cluster {}'.format(kdu_model, cluster_uuid))
334
335 # config filename
336 kube_dir, helm_dir, config_filename, cluster_dir = \
337 self._get_paths(cluster_name=cluster_uuid, create_if_not_exist=True)
338
339 # params to str
340 # params_str = K8sHelmConnector._params_to_set_option(params)
341 params_str, file_to_delete = self._params_to_file_option(cluster_uuid=cluster_uuid, params=params)
342
343 timeout_str = ''
344 if timeout:
345 timeout_str = '--timeout {}'.format(timeout)
346
347 # atomic
348 atomic_str = ''
349 if atomic:
350 atomic_str = '--atomic'
351
352 # version
353 version_str = ''
354 if ':' in kdu_model:
355 parts = kdu_model.split(sep=':')
356 if len(parts) == 2:
357 version_str = '--version {}'.format(parts[1])
358 kdu_model = parts[0]
359
360 # generate a name for the release. Then, check if already exists
361 kdu_instance = None
362 while kdu_instance is None:
363 kdu_instance = K8sHelmConnector._generate_release_name(kdu_model)
364 try:
365 result = await self._status_kdu(
366 cluster_uuid=cluster_uuid,
367 kdu_instance=kdu_instance,
368 show_error_log=False
369 )
370 if result is not None:
371 # instance already exists: generate a new one
372 kdu_instance = None
373 except K8sException:
374 pass
375
376 # helm repo install
377 command = '{} install {} --output yaml --kubeconfig={} --home={} {} {} --name={} {} {}'\
378 .format(self._helm_command, atomic_str, config_filename, helm_dir,
379 params_str, timeout_str, kdu_instance, kdu_model, version_str)
380 self.debug('installing: {}'.format(command))
381
382 if atomic:
383 # exec helm in a task
384 exec_task = asyncio.ensure_future(
385 coro_or_future=self._local_async_exec(command=command, raise_exception_on_error=False)
386 )
387 # write status in another task
388 status_task = asyncio.ensure_future(
389 coro_or_future=self._store_status(
390 cluster_uuid=cluster_uuid,
391 kdu_instance=kdu_instance,
392 db_dict=db_dict,
393 operation='install',
394 run_once=False
395 )
396 )
397
398 # wait for execution task
399 await asyncio.wait([exec_task])
400
401 # cancel status task
402 status_task.cancel()
403
404 output, rc = exec_task.result()
405
406 else:
407
408 output, rc = await self._local_async_exec(command=command, raise_exception_on_error=False)
409
410 # remove temporal values yaml file
411 if file_to_delete:
412 os.remove(file_to_delete)
413
414 # write final status
415 await self._store_status(
416 cluster_uuid=cluster_uuid,
417 kdu_instance=kdu_instance,
418 db_dict=db_dict,
419 operation='install',
420 run_once=True,
421 check_every=0
422 )
423
424 if rc != 0:
425 msg = 'Error executing command: {}\nOutput: {}'.format(command, output)
426 self.error(msg)
427 raise K8sException(msg)
428
429 self.debug('Returning kdu_instance {}'.format(kdu_instance))
430 return kdu_instance
431
432 async def instances_list(
433 self,
434 cluster_uuid: str
435 ) -> list:
436 """
437 returns a list of deployed releases in a cluster
438
439 :param cluster_uuid: the cluster
440 :return:
441 """
442
443 self.debug('list releases for cluster {}'.format(cluster_uuid))
444
445 # config filename
446 kube_dir, helm_dir, config_filename, cluster_dir = \
447 self._get_paths(cluster_name=cluster_uuid, create_if_not_exist=True)
448
449 command = '{} --kubeconfig={} --home={} list --output yaml'\
450 .format(self._helm_command, config_filename, helm_dir)
451
452 output, rc = await self._local_async_exec(command=command, raise_exception_on_error=True)
453
454 if output and len(output) > 0:
455 return yaml.load(output, Loader=yaml.SafeLoader).get('Releases')
456 else:
457 return []
458
459 async def upgrade(
460 self,
461 cluster_uuid: str,
462 kdu_instance: str,
463 kdu_model: str = None,
464 atomic: bool = True,
465 timeout: float = 300,
466 params: dict = None,
467 db_dict: dict = None
468 ):
469
470 self.debug('upgrading {} in cluster {}'.format(kdu_model, cluster_uuid))
471
472 # config filename
473 kube_dir, helm_dir, config_filename, cluster_dir = \
474 self._get_paths(cluster_name=cluster_uuid, create_if_not_exist=True)
475
476 # params to str
477 # params_str = K8sHelmConnector._params_to_set_option(params)
478 params_str, file_to_delete = self._params_to_file_option(cluster_uuid=cluster_uuid, params=params)
479
480 timeout_str = ''
481 if timeout:
482 timeout_str = '--timeout {}'.format(timeout)
483
484 # atomic
485 atomic_str = ''
486 if atomic:
487 atomic_str = '--atomic'
488
489 # version
490 version_str = ''
491 if kdu_model and ':' in kdu_model:
492 parts = kdu_model.split(sep=':')
493 if len(parts) == 2:
494 version_str = '--version {}'.format(parts[1])
495 kdu_model = parts[0]
496
497 # helm repo upgrade
498 command = '{} upgrade {} --output yaml --kubeconfig={} --home={} {} {} {} {} {}'\
499 .format(self._helm_command, atomic_str, config_filename, helm_dir,
500 params_str, timeout_str, kdu_instance, kdu_model, version_str)
501 self.debug('upgrading: {}'.format(command))
502
503 if atomic:
504
505 # exec helm in a task
506 exec_task = asyncio.ensure_future(
507 coro_or_future=self._local_async_exec(command=command, raise_exception_on_error=False)
508 )
509 # write status in another task
510 status_task = asyncio.ensure_future(
511 coro_or_future=self._store_status(
512 cluster_uuid=cluster_uuid,
513 kdu_instance=kdu_instance,
514 db_dict=db_dict,
515 operation='upgrade',
516 run_once=False
517 )
518 )
519
520 # wait for execution task
521 await asyncio.wait([exec_task])
522
523 # cancel status task
524 status_task.cancel()
525 output, rc = exec_task.result()
526
527 else:
528
529 output, rc = await self._local_async_exec(command=command, raise_exception_on_error=False)
530
531 # remove temporal values yaml file
532 if file_to_delete:
533 os.remove(file_to_delete)
534
535 # write final status
536 await self._store_status(
537 cluster_uuid=cluster_uuid,
538 kdu_instance=kdu_instance,
539 db_dict=db_dict,
540 operation='upgrade',
541 run_once=True,
542 check_every=0
543 )
544
545 if rc != 0:
546 msg = 'Error executing command: {}\nOutput: {}'.format(command, output)
547 self.error(msg)
548 raise K8sException(msg)
549
550 # return new revision number
551 instance = await self.get_instance_info(cluster_uuid=cluster_uuid, kdu_instance=kdu_instance)
552 if instance:
553 revision = int(instance.get('Revision'))
554 self.debug('New revision: {}'.format(revision))
555 return revision
556 else:
557 return 0
558
559 async def rollback(
560 self,
561 cluster_uuid: str,
562 kdu_instance: str,
563 revision=0,
564 db_dict: dict = None
565 ):
566
567 self.debug('rollback kdu_instance {} to revision {} from cluster {}'
568 .format(kdu_instance, revision, cluster_uuid))
569
570 # config filename
571 kube_dir, helm_dir, config_filename, cluster_dir = \
572 self._get_paths(cluster_name=cluster_uuid, create_if_not_exist=True)
573
574 command = '{} rollback --kubeconfig={} --home={} {} {} --wait'\
575 .format(self._helm_command, config_filename, helm_dir, kdu_instance, revision)
576
577 # exec helm in a task
578 exec_task = asyncio.ensure_future(
579 coro_or_future=self._local_async_exec(command=command, raise_exception_on_error=False)
580 )
581 # write status in another task
582 status_task = asyncio.ensure_future(
583 coro_or_future=self._store_status(
584 cluster_uuid=cluster_uuid,
585 kdu_instance=kdu_instance,
586 db_dict=db_dict,
587 operation='rollback',
588 run_once=False
589 )
590 )
591
592 # wait for execution task
593 await asyncio.wait([exec_task])
594
595 # cancel status task
596 status_task.cancel()
597
598 output, rc = exec_task.result()
599
600 # write final status
601 await self._store_status(
602 cluster_uuid=cluster_uuid,
603 kdu_instance=kdu_instance,
604 db_dict=db_dict,
605 operation='rollback',
606 run_once=True,
607 check_every=0
608 )
609
610 if rc != 0:
611 msg = 'Error executing command: {}\nOutput: {}'.format(command, output)
612 self.error(msg)
613 raise K8sException(msg)
614
615 # return new revision number
616 instance = await self.get_instance_info(cluster_uuid=cluster_uuid, kdu_instance=kdu_instance)
617 if instance:
618 revision = int(instance.get('Revision'))
619 self.debug('New revision: {}'.format(revision))
620 return revision
621 else:
622 return 0
623
624 async def uninstall(
625 self,
626 cluster_uuid: str,
627 kdu_instance: str
628 ):
629 """
630 Removes an existing KDU instance. It would implicitly use the `delete` call (this call would happen
631 after all _terminate-config-primitive_ of the VNF are invoked).
632
633 :param cluster_uuid: UUID of a K8s cluster known by OSM
634 :param kdu_instance: unique name for the KDU instance to be deleted
635 :return: True if successful
636 """
637
638 self.debug('uninstall kdu_instance {} from cluster {}'.format(kdu_instance, cluster_uuid))
639
640 # config filename
641 kube_dir, helm_dir, config_filename, cluster_dir = \
642 self._get_paths(cluster_name=cluster_uuid, create_if_not_exist=True)
643
644 command = '{} --kubeconfig={} --home={} delete --purge {}'\
645 .format(self._helm_command, config_filename, helm_dir, kdu_instance)
646
647 output, rc = await self._local_async_exec(command=command, raise_exception_on_error=True)
648
649 return self._output_to_table(output)
650
651 async def inspect_kdu(
652 self,
653 kdu_model: str,
654 repo_url: str = None
655 ) -> str:
656
657 self.debug('inspect kdu_model {} from (optional) repo: {}'.format(kdu_model, repo_url))
658
659 return await self._exec_inspect_comand(inspect_command='', kdu_model=kdu_model, repo_url=repo_url)
660
661 async def values_kdu(
662 self,
663 kdu_model: str,
664 repo_url: str = None
665 ) -> str:
666
667 self.debug('inspect kdu_model values {} from (optional) repo: {}'.format(kdu_model, repo_url))
668
669 return await self._exec_inspect_comand(inspect_command='values', kdu_model=kdu_model, repo_url=repo_url)
670
671 async def help_kdu(
672 self,
673 kdu_model: str,
674 repo_url: str = None
675 ) -> str:
676
677 self.debug('inspect kdu_model {} readme.md from repo: {}'.format(kdu_model, repo_url))
678
679 return await self._exec_inspect_comand(inspect_command='readme', kdu_model=kdu_model, repo_url=repo_url)
680
681 async def status_kdu(
682 self,
683 cluster_uuid: str,
684 kdu_instance: str
685 ) -> str:
686
687 # call internal function
688 return await self._status_kdu(
689 cluster_uuid=cluster_uuid,
690 kdu_instance=kdu_instance,
691 show_error_log=True,
692 return_text=True
693 )
694
695 """
696 ##################################################################################################
697 ########################################## P R I V A T E #########################################
698 ##################################################################################################
699 """
700
701 async def _exec_inspect_comand(
702 self,
703 inspect_command: str,
704 kdu_model: str,
705 repo_url: str = None
706 ):
707
708 repo_str = ''
709 if repo_url:
710 repo_str = ' --repo {}'.format(repo_url)
711 idx = kdu_model.find('/')
712 if idx >= 0:
713 idx += 1
714 kdu_model = kdu_model[idx:]
715
716 inspect_command = '{} inspect {} {}{}'.format(self._helm_command, inspect_command, kdu_model, repo_str)
717 output, rc = await self._local_async_exec(command=inspect_command, encode_utf8=True)
718
719 return output
720
721 async def _status_kdu(
722 self,
723 cluster_uuid: str,
724 kdu_instance: str,
725 show_error_log: bool = False,
726 return_text: bool = False
727 ):
728
729 self.debug('status of kdu_instance {}'.format(kdu_instance))
730
731 # config filename
732 kube_dir, helm_dir, config_filename, cluster_dir = \
733 self._get_paths(cluster_name=cluster_uuid, create_if_not_exist=True)
734
735 command = '{} --kubeconfig={} --home={} status {} --output yaml'\
736 .format(self._helm_command, config_filename, helm_dir, kdu_instance)
737
738 output, rc = await self._local_async_exec(
739 command=command,
740 raise_exception_on_error=True,
741 show_error_log=show_error_log
742 )
743
744 if return_text:
745 return str(output)
746
747 if rc != 0:
748 return None
749
750 data = yaml.load(output, Loader=yaml.SafeLoader)
751
752 # remove field 'notes'
753 try:
754 del data.get('info').get('status')['notes']
755 except KeyError:
756 pass
757
758 # parse field 'resources'
759 try:
760 resources = str(data.get('info').get('status').get('resources'))
761 resource_table = self._output_to_table(resources)
762 data.get('info').get('status')['resources'] = resource_table
763 except Exception as e:
764 pass
765
766 return data
767
768 async def get_instance_info(
769 self,
770 cluster_uuid: str,
771 kdu_instance: str
772 ):
773 instances = await self.instances_list(cluster_uuid=cluster_uuid)
774 for instance in instances:
775 if instance.get('Name') == kdu_instance:
776 return instance
777 self.debug('Instance {} not found'.format(kdu_instance))
778 return None
779
780 @staticmethod
781 def _generate_release_name(
782 chart_name: str
783 ):
784 # check embeded chart (file or dir)
785 if chart_name.startswith('/'):
786 # extract file or directory name
787 chart_name = chart_name[chart_name.rfind('/')+1:]
788 # check URL
789 elif '://' in chart_name:
790 # extract last portion of URL
791 chart_name = chart_name[chart_name.rfind('/')+1:]
792
793 name = ''
794 for c in chart_name:
795 if c.isalpha() or c.isnumeric():
796 name += c
797 else:
798 name += '-'
799 if len(name) > 35:
800 name = name[0:35]
801
802 # if does not start with alpha character, prefix 'a'
803 if not name[0].isalpha():
804 name = 'a' + name
805
806 name += '-'
807
808 def get_random_number():
809 r = random.randrange(start=1, stop=99999999)
810 s = str(r)
811 s = s.rjust(10, '0')
812 return s
813
814 name = name + get_random_number()
815 return name.lower()
816
817 async def _store_status(
818 self,
819 cluster_uuid: str,
820 operation: str,
821 kdu_instance: str,
822 check_every: float = 10,
823 db_dict: dict = None,
824 run_once: bool = False
825 ):
826 while True:
827 try:
828 await asyncio.sleep(check_every)
829 detailed_status = await self.status_kdu(cluster_uuid=cluster_uuid, kdu_instance=kdu_instance)
830 status = detailed_status.get('info').get('Description')
831 print('=' * 60)
832 self.debug('STATUS:\n{}'.format(status))
833 self.debug('DETAILED STATUS:\n{}'.format(detailed_status))
834 print('=' * 60)
835 # write status to db
836 result = await self.write_app_status_to_db(
837 db_dict=db_dict,
838 status=str(status),
839 detailed_status=str(detailed_status),
840 operation=operation)
841 if not result:
842 self.info('Error writing in database. Task exiting...')
843 return
844 except asyncio.CancelledError:
845 self.debug('Task cancelled')
846 return
847 except Exception as e:
848 pass
849 finally:
850 if run_once:
851 return
852
853 async def _is_install_completed(
854 self,
855 cluster_uuid: str,
856 kdu_instance: str
857 ) -> bool:
858
859 status = await self._status_kdu(cluster_uuid=cluster_uuid, kdu_instance=kdu_instance, return_text=False)
860
861 # extract info.status.resources-> str
862 # format:
863 # ==> v1/Deployment
864 # NAME READY UP-TO-DATE AVAILABLE AGE
865 # halting-horse-mongodb 0/1 1 0 0s
866 # halting-petit-mongodb 1/1 1 0 0s
867 # blank line
868 resources = K8sHelmConnector._get_deep(status, ('info', 'status', 'resources'))
869
870 # convert to table
871 resources = K8sHelmConnector._output_to_table(resources)
872
873 num_lines = len(resources)
874 index = 0
875 while index < num_lines:
876 try:
877 line1 = resources[index]
878 index += 1
879 # find '==>' in column 0
880 if line1[0] == '==>':
881 line2 = resources[index]
882 index += 1
883 # find READY in column 1
884 if line2[1] == 'READY':
885 # read next lines
886 line3 = resources[index]
887 index += 1
888 while len(line3) > 1 and index < num_lines:
889 ready_value = line3[1]
890 parts = ready_value.split(sep='/')
891 current = int(parts[0])
892 total = int(parts[1])
893 if current < total:
894 self.debug('NOT READY:\n {}'.format(line3))
895 ready = False
896 line3 = resources[index]
897 index += 1
898
899 except Exception as e:
900 pass
901
902 return ready
903
904 @staticmethod
905 def _get_deep(dictionary: dict, members: tuple):
906 target = dictionary
907 value = None
908 try:
909 for m in members:
910 value = target.get(m)
911 if not value:
912 return None
913 else:
914 target = value
915 except Exception as e:
916 pass
917 return value
918
919 # find key:value in several lines
920 @staticmethod
921 def _find_in_lines(p_lines: list, p_key: str) -> str:
922 for line in p_lines:
923 try:
924 if line.startswith(p_key + ':'):
925 parts = line.split(':')
926 the_value = parts[1].strip()
927 return the_value
928 except Exception as e:
929 # ignore it
930 pass
931 return None
932
933 # params for use in -f file
934 # returns values file option and filename (in order to delete it at the end)
935 def _params_to_file_option(self, cluster_uuid: str, params: dict) -> (str, str):
936
937 if params and len(params) > 0:
938 kube_dir, helm_dir, config_filename, cluster_dir = \
939 self._get_paths(cluster_name=cluster_uuid, create_if_not_exist=True)
940
941 def get_random_number():
942 r = random.randrange(start=1, stop=99999999)
943 s = str(r)
944 while len(s) < 10:
945 s = '0' + s
946 return s
947
948 params2 = dict()
949 for key in params:
950 value = params.get(key)
951 if '!!yaml' in str(value):
952 value = yaml.load(value[7:])
953 params2[key] = value
954
955 values_file = get_random_number() + '.yaml'
956 with open(values_file, 'w') as stream:
957 yaml.dump(params2, stream, indent=4, default_flow_style=False)
958
959 return '-f {}'.format(values_file), values_file
960
961 return '', None
962
963 # params for use in --set option
964 @staticmethod
965 def _params_to_set_option(params: dict) -> str:
966 params_str = ''
967 if params and len(params) > 0:
968 start = True
969 for key in params:
970 value = params.get(key, None)
971 if value is not None:
972 if start:
973 params_str += '--set '
974 start = False
975 else:
976 params_str += ','
977 params_str += '{}={}'.format(key, value)
978 return params_str
979
980 @staticmethod
981 def _output_to_lines(output: str) -> list:
982 output_lines = list()
983 lines = output.splitlines(keepends=False)
984 for line in lines:
985 line = line.strip()
986 if len(line) > 0:
987 output_lines.append(line)
988 return output_lines
989
990 @staticmethod
991 def _output_to_table(output: str) -> list:
992 output_table = list()
993 lines = output.splitlines(keepends=False)
994 for line in lines:
995 line = line.replace('\t', ' ')
996 line_list = list()
997 output_table.append(line_list)
998 cells = line.split(sep=' ')
999 for cell in cells:
1000 cell = cell.strip()
1001 if len(cell) > 0:
1002 line_list.append(cell)
1003 return output_table
1004
1005 def _get_paths(self, cluster_name: str, create_if_not_exist: bool = False) -> (str, str, str, str):
1006 """
1007 Returns kube and helm directories
1008
1009 :param cluster_name:
1010 :param create_if_not_exist:
1011 :return: kube, helm directories, config filename and cluster dir.
1012 Raises exception if not exist and cannot create
1013 """
1014
1015 base = self.fs.path
1016 if base.endswith("/") or base.endswith("\\"):
1017 base = base[:-1]
1018
1019 # base dir for cluster
1020 cluster_dir = base + '/' + cluster_name
1021 if create_if_not_exist and not os.path.exists(cluster_dir):
1022 self.debug('Creating dir {}'.format(cluster_dir))
1023 os.makedirs(cluster_dir)
1024 if not os.path.exists(cluster_dir):
1025 msg = 'Base cluster dir {} does not exist'.format(cluster_dir)
1026 self.error(msg)
1027 raise K8sException(msg)
1028
1029 # kube dir
1030 kube_dir = cluster_dir + '/' + '.kube'
1031 if create_if_not_exist and not os.path.exists(kube_dir):
1032 self.debug('Creating dir {}'.format(kube_dir))
1033 os.makedirs(kube_dir)
1034 if not os.path.exists(kube_dir):
1035 msg = 'Kube config dir {} does not exist'.format(kube_dir)
1036 self.error(msg)
1037 raise K8sException(msg)
1038
1039 # helm home dir
1040 helm_dir = cluster_dir + '/' + '.helm'
1041 if create_if_not_exist and not os.path.exists(helm_dir):
1042 self.debug('Creating dir {}'.format(helm_dir))
1043 os.makedirs(helm_dir)
1044 if not os.path.exists(helm_dir):
1045 msg = 'Helm config dir {} does not exist'.format(helm_dir)
1046 self.error(msg)
1047 raise K8sException(msg)
1048
1049 config_filename = kube_dir + '/config'
1050 return kube_dir, helm_dir, config_filename, cluster_dir
1051
1052 @staticmethod
1053 def _remove_multiple_spaces(str):
1054 str = str.strip()
1055 while ' ' in str:
1056 str = str.replace(' ', ' ')
1057 return str
1058
1059 def _local_exec(
1060 self,
1061 command: str
1062 ) -> (str, int):
1063 command = K8sHelmConnector._remove_multiple_spaces(command)
1064 self.debug('Executing sync local command: {}'.format(command))
1065 # raise exception if fails
1066 output = ''
1067 try:
1068 output = subprocess.check_output(command, shell=True, universal_newlines=True)
1069 return_code = 0
1070 self.debug(output)
1071 except Exception as e:
1072 return_code = 1
1073
1074 return output, return_code
1075
1076 async def _local_async_exec(
1077 self,
1078 command: str,
1079 raise_exception_on_error: bool = False,
1080 show_error_log: bool = True,
1081 encode_utf8: bool = False
1082 ) -> (str, int):
1083
1084 command = K8sHelmConnector._remove_multiple_spaces(command)
1085 self.debug('Executing async local command: {}'.format(command))
1086
1087 # split command
1088 command = command.split(sep=' ')
1089
1090 try:
1091 process = await asyncio.create_subprocess_exec(
1092 *command,
1093 stdout=asyncio.subprocess.PIPE,
1094 stderr=asyncio.subprocess.PIPE
1095 )
1096
1097 # wait for command terminate
1098 stdout, stderr = await process.communicate()
1099
1100 return_code = process.returncode
1101
1102 output = ''
1103 if stdout:
1104 output = stdout.decode('utf-8').strip()
1105 # output = stdout.decode()
1106 if stderr:
1107 output = stderr.decode('utf-8').strip()
1108 # output = stderr.decode()
1109
1110 if return_code != 0 and show_error_log:
1111 self.debug('Return code (FAIL): {}\nOutput:\n{}'.format(return_code, output))
1112 else:
1113 self.debug('Return code: {}'.format(return_code))
1114
1115 if raise_exception_on_error and return_code != 0:
1116 raise K8sException(output)
1117
1118 if encode_utf8:
1119 output = output.encode('utf-8').strip()
1120 output = str(output).replace('\\n', '\n')
1121
1122 return output, return_code
1123
1124 except asyncio.CancelledError:
1125 raise
1126 except K8sException:
1127 raise
1128 except Exception as e:
1129 msg = 'Exception executing command: {} -> {}'.format(command, e)
1130 self.error(msg)
1131 if raise_exception_on_error:
1132 raise K8sException(e) from e
1133 else:
1134 return '', -1
1135
1136 def _check_file_exists(self, filename: str, exception_if_not_exists: bool = False):
1137 self.debug('Checking if file {} exists...'.format(filename))
1138 if os.path.exists(filename):
1139 return True
1140 else:
1141 msg = 'File {} does not exist'.format(filename)
1142 if exception_if_not_exists:
1143 self.error(msg)
1144 raise K8sException(msg)