- self.warning(msg='helm init failed (it was already initialized): {}'.format(e))
-
- self.log.info('K8S Helm connector initialized')
-
- async def init_env(
- self,
- k8s_creds: str,
- namespace: str = 'kube-system',
- reuse_cluster_uuid=None
- ) -> (str, bool):
- """
- It prepares a given K8s cluster environment to run Charts on both sides:
- client (OSM)
- server (Tiller)
-
- :param k8s_creds: credentials to access a given K8s cluster, i.e. a valid '.kube/config'
- :param namespace: optional namespace to be used for helm. By default, 'kube-system' will be used
- :param reuse_cluster_uuid: existing cluster uuid for reuse
- :return: uuid of the K8s cluster and True if connector has installed some software in the cluster
- (on error, an exception will be raised)
- """
-
- cluster_uuid = reuse_cluster_uuid
- if not cluster_uuid:
- cluster_uuid = str(uuid4())
-
- self.log.debug('Initializing K8S environment. namespace: {}'.format(namespace))
-
- # create config filename
- kube_dir, helm_dir, config_filename, cluster_dir = \
- self._get_paths(cluster_name=cluster_uuid, create_if_not_exist=True)
- f = open(config_filename, "w")
- f.write(k8s_creds)
- f.close()
-
- # check if tiller pod is up in cluster
- command = '{} --kubeconfig={} --namespace={} get deployments'\
- .format(self.kubectl_command, config_filename, namespace)
- output, rc = await self._local_async_exec(command=command, raise_exception_on_error=True)
-
- output_table = K8sHelmConnector._output_to_table(output=output)
-
- # find 'tiller' pod in all pods
- already_initialized = False
- try:
- for row in output_table:
- if row[0].startswith('tiller-deploy'):
- already_initialized = True
- break
- except Exception as e:
- pass
-
- # helm init
- n2vc_installed_sw = False
- if not already_initialized:
- self.log.info('Initializing helm in client and server: {}'.format(cluster_uuid))
- command = '{} --kubeconfig={} --tiller-namespace={} --home={} init'\
- .format(self._helm_command, config_filename, namespace, helm_dir)
- output, rc = await self._local_async_exec(command=command, raise_exception_on_error=True)
- n2vc_installed_sw = True
- else:
- # check client helm installation
- check_file = helm_dir + '/repository/repositories.yaml'
- if not self._check_file_exists(filename=check_file, exception_if_not_exists=False):
- self.log.info('Initializing helm in client: {}'.format(cluster_uuid))
- command = '{} --kubeconfig={} --tiller-namespace={} --home={} init --client-only'\
- .format(self._helm_command, config_filename, namespace, helm_dir)
- output, rc = await self._local_async_exec(command=command, raise_exception_on_error=True)
- else:
- self.log.info('Helm client already initialized')
-
- self.log.info('Cluster initialized {}'.format(cluster_uuid))
-
- return cluster_uuid, n2vc_installed_sw
-
- async def repo_add(
- self,
- cluster_uuid: str,
- name: str,
- url: str,
- repo_type: str = 'chart'
- ):
-
- self.log.debug('adding {} repository {}. URL: {}'.format(repo_type, name, url))
-
- # config filename
- kube_dir, helm_dir, config_filename, cluster_dir = \
- self._get_paths(cluster_name=cluster_uuid, create_if_not_exist=True)
-
- # helm repo update
- command = '{} --kubeconfig={} --home={} repo update'.format(self._helm_command, config_filename, helm_dir)
- self.log.debug('updating repo: {}'.format(command))
- await self._local_async_exec(command=command, raise_exception_on_error=False)
-
- # helm repo add name url
- command = '{} --kubeconfig={} --home={} repo add {} {}'\
- .format(self._helm_command, config_filename, helm_dir, name, url)
- self.log.debug('adding repo: {}'.format(command))
- await self._local_async_exec(command=command, raise_exception_on_error=True)
-
- async def repo_list(
- self,
- cluster_uuid: str
- ) -> list:
- """
- Get the list of registered repositories
-
- :return: list of registered repositories: [ (name, url) .... ]
- """
-
- self.log.debug('list repositories for cluster {}'.format(cluster_uuid))
-
- # config filename
- kube_dir, helm_dir, config_filename, cluster_dir = \
- self._get_paths(cluster_name=cluster_uuid, create_if_not_exist=True)
-
- command = '{} --kubeconfig={} --home={} repo list --output yaml'\
- .format(self._helm_command, config_filename, helm_dir)
-
- output, rc = await self._local_async_exec(command=command, raise_exception_on_error=True)
- if output and len(output) > 0:
- return yaml.load(output, Loader=yaml.SafeLoader)
- else:
- return []
-
- async def repo_remove(
- self,
- cluster_uuid: str,
- name: str
- ):
- """
- Remove a repository from OSM
-
- :param cluster_uuid: the cluster
- :param name: repo name in OSM
- :return: True if successful
- """
-
- self.log.debug('list repositories for cluster {}'.format(cluster_uuid))
-
- # config filename
- kube_dir, helm_dir, config_filename, cluster_dir = \
- self._get_paths(cluster_name=cluster_uuid, create_if_not_exist=True)
-
- command = '{} --kubeconfig={} --home={} repo remove {}'\
- .format(self._helm_command, config_filename, helm_dir, name)
-
- await self._local_async_exec(command=command, raise_exception_on_error=True)
-
- async def reset(
- self,
- cluster_uuid: str,
- force: bool = False,
- uninstall_sw: bool = False
- ) -> bool:
-
- self.log.debug('Resetting K8s environment. cluster uuid: {}'.format(cluster_uuid))
-
- # get kube and helm directories
- kube_dir, helm_dir, config_filename, cluster_dir = \
- self._get_paths(cluster_name=cluster_uuid, create_if_not_exist=False)
-
- # uninstall releases if needed
- releases = await self.instances_list(cluster_uuid=cluster_uuid)
- if len(releases) > 0:
- if force:
- for r in releases:
- try:
- kdu_instance = r.get('Name')
- chart = r.get('Chart')
- self.log.debug('Uninstalling {} -> {}'.format(chart, kdu_instance))
- await self.uninstall(cluster_uuid=cluster_uuid, kdu_instance=kdu_instance)
- except Exception as e:
- self.log.error('Error uninstalling release {}: {}'.format(kdu_instance, e))
- else:
- msg = 'Cluster has releases and not force. Cannot reset K8s environment. Cluster uuid: {}'\
- .format(cluster_uuid)
- self.log.error(msg)
- raise K8sException(msg)
-
- if uninstall_sw:
-
- self.log.debug('Uninstalling tiller from cluster {}'.format(cluster_uuid))
-
- # find namespace for tiller pod
- command = '{} --kubeconfig={} get deployments --all-namespaces'\
- .format(self.kubectl_command, config_filename)
- output, rc = await self._local_async_exec(command=command, raise_exception_on_error=False)
- output_table = K8sHelmConnector._output_to_table(output=output)
- namespace = None
- for r in output_table:
- try:
- if 'tiller-deploy' in r[1]:
- namespace = r[0]
- break
- except Exception as e:
- pass
- else:
- msg = 'Tiller deployment not found in cluster {}'.format(cluster_uuid)
- self.log.error(msg)
-
- self.log.debug('namespace for tiller: {}'.format(namespace))
-
- force_str = '--force'
-
- if namespace:
- # delete tiller deployment
- self.log.debug('Deleting tiller deployment for cluster {}, namespace {}'.format(cluster_uuid, namespace))
- command = '{} --namespace {} --kubeconfig={} {} delete deployment tiller-deploy'\
- .format(self.kubectl_command, namespace, config_filename, force_str)
- await self._local_async_exec(command=command, raise_exception_on_error=False)
-
- # uninstall tiller from cluster
- self.log.debug('Uninstalling tiller from cluster {}'.format(cluster_uuid))
- command = '{} --kubeconfig={} --home={} reset'\
- .format(self._helm_command, config_filename, helm_dir)
- self.log.debug('resetting: {}'.format(command))
- output, rc = await self._local_async_exec(command=command, raise_exception_on_error=True)
- else:
- self.log.debug('namespace not found')