X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=n2vc%2Fk8s_helm_conn.py;h=13a69c4fb748a2dd512459293843b0b6d01e901c;hb=05ddb45012985d97d321499867b40eeee4b6b43d;hp=a900d97fdaf634a693611318c801a79dbc242dc9;hpb=b8ba1af7b8fb360df043b0aec74afc928fbd363a;p=osm%2FN2VC.git diff --git a/n2vc/k8s_helm_conn.py b/n2vc/k8s_helm_conn.py index a900d97..13a69c4 100644 --- a/n2vc/k8s_helm_conn.py +++ b/n2vc/k8s_helm_conn.py @@ -280,40 +280,39 @@ class K8sHelmConnector(K8sConnector): ) -> bool: namespace, cluster_id = self._get_namespace_cluster_id(cluster_uuid) - self.log.debug( - "Resetting K8s environment. cluster uuid: {}".format(cluster_id) - ) + self.log.debug("Resetting K8s environment. cluster uuid: {} uninstall={}" + .format(cluster_id, uninstall_sw)) # get kube and helm directories _kube_dir, helm_dir, config_filename, _cluster_dir = self._get_paths( cluster_name=cluster_id, create_if_not_exist=False ) - # uninstall releases if needed - releases = await self.instances_list(cluster_uuid=cluster_uuid) - if len(releases) > 0: - if force: - for r in releases: - try: - kdu_instance = r.get("Name") - chart = r.get("Chart") - self.log.debug( - "Uninstalling {} -> {}".format(chart, kdu_instance) - ) - await self.uninstall( - cluster_uuid=cluster_uuid, kdu_instance=kdu_instance - ) - except Exception as e: - self.log.error( - "Error uninstalling release {}: {}".format(kdu_instance, e) - ) - else: - msg = ( - "Cluster has releases and not force. Cannot reset K8s " - "environment. Cluster uuid: {}" - ).format(cluster_id) - self.log.error(msg) - raise K8sException(msg) + # uninstall releases if needed. + if uninstall_sw: + releases = await self.instances_list(cluster_uuid=cluster_uuid) + if len(releases) > 0: + if force: + for r in releases: + try: + kdu_instance = r.get("Name") + chart = r.get("Chart") + self.log.debug( + "Uninstalling {} -> {}".format(chart, kdu_instance) + ) + await self.uninstall( + cluster_uuid=cluster_uuid, kdu_instance=kdu_instance + ) + except Exception as e: + self.log.error( + "Error uninstalling release {}: {}".format(kdu_instance, e) + ) + else: + msg = ( + "Cluster uuid: {} has releases and not force. Leaving K8s helm environment" + ).format(cluster_id) + self.log.warn(msg) + uninstall_sw = False # Allow to remove k8s cluster without removing Tiller if uninstall_sw: @@ -838,6 +837,7 @@ class K8sHelmConnector(K8sConnector): kdu_instance: str, namespace: str) -> list: + _, cluster_id = self._get_namespace_cluster_id(cluster_uuid) self.log.debug( "get_services: cluster_uuid: {}, kdu_instance: {}".format( cluster_uuid, kdu_instance @@ -845,7 +845,7 @@ class K8sHelmConnector(K8sConnector): ) status = await self._status_kdu( - cluster_uuid, kdu_instance, return_text=False + cluster_id, kdu_instance, return_text=False ) service_names = self._parse_helm_status_service_info(status) @@ -867,8 +867,9 @@ class K8sHelmConnector(K8sConnector): ) # get paths + _, cluster_id = self._get_namespace_cluster_id(cluster_uuid) _kube_dir, helm_dir, config_filename, _cluster_dir = self._get_paths( - cluster_name=cluster_uuid, create_if_not_exist=True + cluster_name=cluster_id, create_if_not_exist=True ) command = "{} --kubeconfig={} --namespace={} get service {} -o=yaml".format( @@ -897,7 +898,7 @@ class K8sHelmConnector(K8sConnector): async def synchronize_repos(self, cluster_uuid: str): _, cluster_id = self._get_namespace_cluster_id(cluster_uuid) - self.log.debug("syncronize repos for cluster helm-id: {}",) + self.log.debug("syncronize repos for cluster helm-id: {}".format(cluster_id)) try: update_repos_timeout = ( 300 # max timeout to sync a single repos, more than this is too much @@ -915,8 +916,8 @@ class K8sHelmConnector(K8sConnector): # elements that must be deleted deleted_repo_list = [] added_repo_dict = {} - self.log.debug("helm_chart_repos: {}".format(nbi_repo_list)) - self.log.debug("helm_charts_added: {}".format(cluster_repo_dict)) + # self.log.debug("helm_chart_repos: {}".format(nbi_repo_list)) + # self.log.debug("helm_charts_added: {}".format(cluster_repo_dict)) # obtain repos to add: registered by nbi but not added repos_to_add = [ @@ -933,7 +934,8 @@ class K8sHelmConnector(K8sConnector): # delete repos: must delete first then add because there may be # different repos with same name but # different id and url - self.log.debug("repos to delete: {}".format(repos_to_delete)) + if repos_to_delete: + self.log.debug("repos to delete: {}".format(repos_to_delete)) for repo_id in repos_to_delete: # try to delete repos try: @@ -956,7 +958,8 @@ class K8sHelmConnector(K8sConnector): deleted_repo_list.append(repo_id) # add repos - self.log.debug("repos to add: {}".format(repos_to_add)) + if repos_to_add: + self.log.debug("repos to add: {}".format(repos_to_add)) for repo_id in repos_to_add: # obtain the repo data from the db # if there is an error getting the repo in the database we will @@ -1135,6 +1138,7 @@ class K8sHelmConnector(K8sConnector): db_dict: dict = None, run_once: bool = False, ): + previous_exception = None while True: try: await asyncio.sleep(check_every) @@ -1158,8 +1162,10 @@ class K8sHelmConnector(K8sConnector): self.log.debug("Task cancelled") return except Exception as e: - self.log.debug("_store_status exception: {}".format(str(e)), exc_info=True) - pass + # log only once in the while loop + if str(previous_exception) != str(e): + self.log.debug("_store_status exception: {}".format(str(e))) + previous_exception = e finally: if run_once: return