Reformat LCM to standardized format
[osm/LCM.git] / osm_lcm / lcm_helm_conn.py
index 845a413..40624ad 100644 (file)
@@ -35,16 +35,18 @@ from osm_lcm.data_utils.filesystem.filesystem import Filesystem
 from n2vc.n2vc_conn import N2VCConnector
 from n2vc.k8s_helm_conn import K8sHelmConnector
 from n2vc.k8s_helm3_conn import K8sHelm3Connector
-from n2vc.exceptions import N2VCBadArgumentsException, N2VCException, N2VCExecutionException
+from n2vc.exceptions import (
+    N2VCBadArgumentsException,
+    N2VCException,
+    N2VCExecutionException,
+)
 
 from osm_lcm.lcm_utils import deep_get
 
 
 def retryer(max_wait_time_var="_initial_retry_time", delay_time_var="_retry_delay"):
     def wrapper(func):
-        retry_exceptions = (
-            ConnectionRefusedError
-        )
+        retry_exceptions = ConnectionRefusedError
 
         @functools.wraps(func)
         async def wrapped(*args, **kwargs):
@@ -69,7 +71,9 @@ def retryer(max_wait_time_var="_initial_retry_time", delay_time_var="_retry_dela
                     continue
             else:
                 return ConnectionRefusedError
+
         return wrapped
+
     return wrapper
 
 
@@ -85,11 +89,13 @@ class LCMHelmConn(N2VCConnector, LcmBase):
     # Time beetween retries, retry time after a connection error is raised
     _EE_RETRY_DELAY = 10
 
-    def __init__(self,
-                 log: object = None,
-                 loop: object = None,
-                 vca_config: dict = None,
-                 on_update_db=None, ):
+    def __init__(
+        self,
+        log: object = None,
+        loop: object = None,
+        vca_config: dict = None,
+        on_update_db=None,
+    ):
         """
         Initialize EE helm connector.
         """
@@ -99,12 +105,7 @@ class LCMHelmConn(N2VCConnector, LcmBase):
 
         # parent class constructor
         N2VCConnector.__init__(
-            self,
-            log=log,
-            loop=loop,
-            on_update_db=on_update_db,
-            db=self.db,
-            fs=self.fs
+            self, log=log, loop=loop, on_update_db=on_update_db, db=self.db, fs=self.fs
         )
 
         self.vca_config = vca_config
@@ -121,14 +122,18 @@ class LCMHelmConn(N2VCConnector, LcmBase):
             self.log.debug("Initial retry time: {}".format(self._initial_retry_time))
         else:
             self._initial_retry_time = self._MAX_INITIAL_RETRY_TIME
-            self.log.debug("Applied default retry time: {}".format(self._initial_retry_time))
+            self.log.debug(
+                "Applied default retry time: {}".format(self._initial_retry_time)
+            )
 
         if self.vca_config and self.vca_config.get("eegrpctimeout"):
             self._max_retry_time = self.vca_config.get("eegrpctimeout")
             self.log.debug("Retry time: {}".format(self._max_retry_time))
         else:
             self._max_retry_time = self._MAX_RETRY_TIME
-            self.log.debug("Applied default retry time: {}".format(self._max_retry_time))
+            self.log.debug(
+                "Applied default retry time: {}".format(self._max_retry_time)
+            )
 
         # initialize helm connector for helmv2 and helmv3
         self._k8sclusterhelm2 = K8sHelmConnector(
@@ -153,16 +158,19 @@ class LCMHelmConn(N2VCConnector, LcmBase):
         self.log.info("Helm N2VC connector initialized")
 
     # TODO - ¿reuse_ee_id?
-    async def create_execution_environment(self,
-                                           namespace: str,
-                                           db_dict: dict,
-                                           reuse_ee_id: str = None,
-                                           progress_timeout: float = None,
-                                           total_timeout: float = None,
-                                           config: dict = None,
-                                           artifact_path: str = None,
-                                           vca_type: str = None,
-                                           *kargs, **kwargs) -> (str, dict):
+    async def create_execution_environment(
+        self,
+        namespace: str,
+        db_dict: dict,
+        reuse_ee_id: str = None,
+        progress_timeout: float = None,
+        total_timeout: float = None,
+        config: dict = None,
+        artifact_path: str = None,
+        vca_type: str = None,
+        *kargs,
+        **kwargs,
+    ) -> (str, dict):
         """
         Creates a new helm execution environment deploying the helm-chat indicated in the
         attifact_path
@@ -184,8 +192,7 @@ class LCMHelmConn(N2VCConnector, LcmBase):
 
         self.log.info(
             "create_execution_environment: namespace: {}, artifact_path: {}, db_dict: {}, "
-            "reuse_ee_id: {}".format(
-                namespace, artifact_path, db_dict, reuse_ee_id)
+            "reuse_ee_id: {}".format(namespace, artifact_path, db_dict, reuse_ee_id)
         )
 
         # Validate artifact-path is provided
@@ -233,23 +240,29 @@ class LCMHelmConn(N2VCConnector, LcmBase):
                     db_dict=db_dict,
                     kdu_model=full_path,
                 )
-                await self._k8sclusterhelm2.install(system_cluster_uuid, kdu_model=full_path,
-                                                    kdu_instance=helm_id,
-                                                    namespace=self._KUBECTL_OSM_NAMESPACE,
-                                                    params=config,
-                                                    db_dict=db_dict,
-                                                    timeout=progress_timeout)
+                await self._k8sclusterhelm2.install(
+                    system_cluster_uuid,
+                    kdu_model=full_path,
+                    kdu_instance=helm_id,
+                    namespace=self._KUBECTL_OSM_NAMESPACE,
+                    params=config,
+                    db_dict=db_dict,
+                    timeout=progress_timeout,
+                )
             else:
                 helm_id = self._k8sclusterhelm2.generate_kdu_instance_name(
                     db_dict=db_dict,
                     kdu_model=full_path,
                 )
-                await self._k8sclusterhelm3.install(system_cluster_uuid, kdu_model=full_path,
-                                                    kdu_instance=helm_id,
-                                                    namespace=self._KUBECTL_OSM_NAMESPACE,
-                                                    params=config,
-                                                    db_dict=db_dict,
-                                                    timeout=progress_timeout)
+                await self._k8sclusterhelm3.install(
+                    system_cluster_uuid,
+                    kdu_model=full_path,
+                    kdu_instance=helm_id,
+                    namespace=self._KUBECTL_OSM_NAMESPACE,
+                    params=config,
+                    db_dict=db_dict,
+                    timeout=progress_timeout,
+                )
 
             ee_id = "{}:{}.{}".format(vca_type, self._KUBECTL_OSM_NAMESPACE, helm_id)
             return ee_id, None
@@ -259,9 +272,16 @@ class LCMHelmConn(N2VCConnector, LcmBase):
             self.log.error("Error deploying chart ee: {}".format(e), exc_info=True)
             raise N2VCException("Error deploying chart ee: {}".format(e))
 
-    async def register_execution_environment(self, namespace: str, credentials: dict, db_dict: dict,
-                                             progress_timeout: float = None, total_timeout: float = None,
-                                             *kargs, **kwargs) -> str:
+    async def register_execution_environment(
+        self,
+        namespace: str,
+        credentials: dict,
+        db_dict: dict,
+        progress_timeout: float = None,
+        total_timeout: float = None,
+        *kargs,
+        **kwargs,
+    ) -> str:
         # nothing to do
         pass
 
@@ -301,8 +321,7 @@ class LCMHelmConn(N2VCConnector, LcmBase):
         """
 
         self.log.info(
-            "get_ee_ssh_public_key: ee_id: {}, db_dict: {}".format(
-                ee_id, db_dict)
+            "get_ee_ssh_public_key: ee_id: {}, db_dict: {}".format(ee_id, db_dict)
         )
 
         # check arguments
@@ -325,14 +344,14 @@ class LCMHelmConn(N2VCConnector, LcmBase):
             raise N2VCException("Error obtaining ee ssh_ke: {}".format(e))
 
     async def exec_primitive(
-            self,
-            ee_id: str,
-            primitive_name: str,
-            params_dict: dict,
-            db_dict: dict = None,
-            progress_timeout: float = None,
-            total_timeout: float = None,
-            **kwargs,
+        self,
+        ee_id: str,
+        primitive_name: str,
+        params_dict: dict,
+        db_dict: dict = None,
+        progress_timeout: float = None,
+        total_timeout: float = None,
+        **kwargs,
     ) -> str:
         """
         Execute a primitive in the execution environment
@@ -354,9 +373,11 @@ class LCMHelmConn(N2VCConnector, LcmBase):
         :returns str: primitive result, if ok. It raises exceptions in case of fail
         """
 
-        self.log.info("exec primitive for ee_id : {}, primitive_name: {}, params_dict: {}, db_dict: {}".format(
-            ee_id, primitive_name, params_dict, db_dict
-        ))
+        self.log.info(
+            "exec primitive for ee_id : {}, primitive_name: {}, params_dict: {}, db_dict: {}".format(
+                ee_id, primitive_name, params_dict, db_dict
+            )
+        )
 
         # check arguments
         if ee_id is None or len(ee_id) == 0:
@@ -380,12 +401,20 @@ class LCMHelmConn(N2VCConnector, LcmBase):
         if primitive_name == "config":
             try:
                 # Execute config primitive, higher timeout to check the case ee is starting
-                status, detailed_message = await self._execute_config_primitive(ip_addr, params_dict, db_dict=db_dict)
-                self.log.debug("Executed config primitive ee_id_ {}, status: {}, message: {}".format(
-                    ee_id, status, detailed_message))
+                status, detailed_message = await self._execute_config_primitive(
+                    ip_addr, params_dict, db_dict=db_dict
+                )
+                self.log.debug(
+                    "Executed config primitive ee_id_ {}, status: {}, message: {}".format(
+                        ee_id, status, detailed_message
+                    )
+                )
                 if status != "OK":
-                    self.log.error("Error configuring helm ee, status: {}, message: {}".format(
-                        status, detailed_message))
+                    self.log.error(
+                        "Error configuring helm ee, status: {}, message: {}".format(
+                            status, detailed_message
+                        )
+                    )
                     raise N2VCExecutionException(
                         message="Error configuring helm ee_id: {}, status: {}, message: {}: ".format(
                             ee_id, status, detailed_message
@@ -395,23 +424,26 @@ class LCMHelmConn(N2VCConnector, LcmBase):
             except Exception as e:
                 self.log.error("Error configuring helm ee: {}".format(e))
                 raise N2VCExecutionException(
-                    message="Error configuring helm ee_id: {}, {}".format(
-                        ee_id, e
-                    ),
+                    message="Error configuring helm ee_id: {}, {}".format(ee_id, e),
                     primitive_name=primitive_name,
                 )
             return "CONFIG OK"
         else:
             try:
                 # Execute primitive
-                status, detailed_message = await self._execute_primitive(ip_addr, primitive_name,
-                                                                         params_dict, db_dict=db_dict)
-                self.log.debug("Executed primitive {} ee_id_ {}, status: {}, message: {}".format(
-                    primitive_name, ee_id, status, detailed_message))
+                status, detailed_message = await self._execute_primitive(
+                    ip_addr, primitive_name, params_dict, db_dict=db_dict
+                )
+                self.log.debug(
+                    "Executed primitive {} ee_id_ {}, status: {}, message: {}".format(
+                        primitive_name, ee_id, status, detailed_message
+                    )
+                )
                 if status != "OK" and status != "PROCESSING":
                     self.log.error(
                         "Execute primitive {} returned not ok status: {}, message: {}".format(
-                            primitive_name, status, detailed_message)
+                            primitive_name, status, detailed_message
+                        )
                     )
                     raise N2VCExecutionException(
                         message="Execute primitive {} returned not ok status: {}, message: {}".format(
@@ -479,10 +511,14 @@ class LCMHelmConn(N2VCConnector, LcmBase):
         except N2VCException:
             raise
         except Exception as e:
-            self.log.error("Error deleting ee id: {}: {}".format(ee_id, e), exc_info=True)
+            self.log.error(
+                "Error deleting ee id: {}: {}".format(ee_id, e), exc_info=True
+            )
             raise N2VCException("Error deleting ee id {}: {}".format(ee_id, e))
 
-    async def delete_namespace(self, namespace: str, db_dict: dict = None, total_timeout: float = None):
+    async def delete_namespace(
+        self, namespace: str, db_dict: dict = None, total_timeout: float = None
+    ):
         # method not implemented for this connector, execution environments must be deleted individually
         pass
 
@@ -495,7 +531,8 @@ class LCMHelmConn(N2VCConnector, LcmBase):
         progress_timeout: float = None,
         total_timeout: float = None,
         config: dict = None,
-        *kargs, **kwargs
+        *kargs,
+        **kwargs,
     ) -> str:
         pass
 
@@ -512,13 +549,19 @@ class LCMHelmConn(N2VCConnector, LcmBase):
 
     @retryer(max_wait_time_var="_initial_retry_time", delay_time_var="_retry_delay")
     async def _execute_config_primitive(self, ip_addr, params, db_dict=None):
-        return await self._execute_primitive_internal(ip_addr, "config", params, db_dict=db_dict)
+        return await self._execute_primitive_internal(
+            ip_addr, "config", params, db_dict=db_dict
+        )
 
     @retryer(max_wait_time_var="_max_retry_time", delay_time_var="_retry_delay")
     async def _execute_primitive(self, ip_addr, primitive_name, params, db_dict=None):
-        return await self._execute_primitive_internal(ip_addr, primitive_name, params, db_dict=db_dict)
+        return await self._execute_primitive_internal(
+            ip_addr, primitive_name, params, db_dict=db_dict
+        )
 
-    async def _execute_primitive_internal(self, ip_addr, primitive_name, params, db_dict=None):
+    async def _execute_primitive_internal(
+        self, ip_addr, primitive_name, params, db_dict=None
+    ):
 
         channel = Channel(ip_addr, self._ee_service_port)
         try:
@@ -526,16 +569,25 @@ class LCMHelmConn(N2VCConnector, LcmBase):
             async with stub.RunPrimitive.open() as stream:
                 primitive_id = str(uuid.uuid1())
                 result = None
-                self.log.debug("Execute primitive internal: id:{}, name:{}, params: {}".
-                               format(primitive_id, primitive_name, params))
+                self.log.debug(
+                    "Execute primitive internal: id:{}, name:{}, params: {}".format(
+                        primitive_id, primitive_name, params
+                    )
+                )
                 await stream.send_message(
-                    PrimitiveRequest(id=primitive_id, name=primitive_name, params=yaml.dump(params)), end=True)
+                    PrimitiveRequest(
+                        id=primitive_id, name=primitive_name, params=yaml.dump(params)
+                    ),
+                    end=True,
+                )
                 async for reply in stream:
                     self.log.debug("Received reply: {}".format(reply))
                     result = reply
                     # If db_dict provided write notifs in database
                     if db_dict:
-                        self._write_op_detailed_status(db_dict, reply.status, reply.detailed_message)
+                        self._write_op_detailed_status(
+                            db_dict, reply.status, reply.detailed_message
+                        )
                 if result:
                     return reply.status, reply.detailed_message
                 else:
@@ -564,24 +616,34 @@ class LCMHelmConn(N2VCConnector, LcmBase):
 
     async def _get_system_cluster_id(self):
         if not self._system_cluster_id:
-            db_k8cluster = self.db.get_one("k8sclusters", {"name": self._KUBECTL_OSM_CLUSTER_NAME})
+            db_k8cluster = self.db.get_one(
+                "k8sclusters", {"name": self._KUBECTL_OSM_CLUSTER_NAME}
+            )
             k8s_hc_id = deep_get(db_k8cluster, ("_admin", "helm-chart-v3", "id"))
             if not k8s_hc_id:
                 try:
                     # backward compatibility for existing clusters that have not been initialized for helm v3
                     cluster_id = db_k8cluster.get("_id")
                     k8s_credentials = yaml.safe_dump(db_k8cluster.get("credentials"))
-                    k8s_hc_id, uninstall_sw = await self._k8sclusterhelm3.init_env(k8s_credentials,
-                                                                                   reuse_cluster_uuid=cluster_id)
-                    db_k8scluster_update = {"_admin.helm-chart-v3.error_msg": None,
-                                            "_admin.helm-chart-v3.id": k8s_hc_id,
-                                            "_admin.helm-chart-v3}.created": uninstall_sw,
-                                            "_admin.helm-chart-v3.operationalState": "ENABLED"}
+                    k8s_hc_id, uninstall_sw = await self._k8sclusterhelm3.init_env(
+                        k8s_credentials, reuse_cluster_uuid=cluster_id
+                    )
+                    db_k8scluster_update = {
+                        "_admin.helm-chart-v3.error_msg": None,
+                        "_admin.helm-chart-v3.id": k8s_hc_id,
+                        "_admin.helm-chart-v3}.created": uninstall_sw,
+                        "_admin.helm-chart-v3.operationalState": "ENABLED",
+                    }
                     self.update_db_2("k8sclusters", cluster_id, db_k8scluster_update)
                 except Exception as e:
-                    self.log.error("error initializing helm-v3 cluster: {}".format(str(e)))
-                    raise N2VCException("K8s system cluster '{}' has not been initialized for helm-chart-v3".format(
-                        cluster_id))
+                    self.log.error(
+                        "error initializing helm-v3 cluster: {}".format(str(e))
+                    )
+                    raise N2VCException(
+                        "K8s system cluster '{}' has not been initialized for helm-chart-v3".format(
+                            cluster_id
+                        )
+                    )
             self._system_cluster_id = k8s_hc_id
         return self._system_cluster_id
 
@@ -591,6 +653,6 @@ class LCMHelmConn(N2VCConnector, LcmBase):
         namespace.helm_id for backward compatibility
         If exists helm version can be helm-v3 or helm (helm-v2 old version)
         """
-        version, _, part_id = ee_id.rpartition(':')
-        namespace, _, helm_id = part_id.rpartition('.')
+        version, _, part_id = ee_id.rpartition(":")
+        namespace, _, helm_id = part_id.rpartition(".")
         return version, namespace, helm_id