Feature 10957: Set up dedicated namespace for helm based EE and add client side TLS...
[osm/LCM.git] / osm_lcm / ns.py
index 305b70b..83705d4 100644 (file)
@@ -124,7 +124,7 @@ from copy import copy, deepcopy
 from time import time
 from uuid import uuid4
 
-from random import randint
+from random import SystemRandom
 
 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
 
@@ -133,6 +133,7 @@ class NsLcm(LcmBase):
     SUBOPERATION_STATUS_NOT_FOUND = -1
     SUBOPERATION_STATUS_NEW = -2
     SUBOPERATION_STATUS_SKIP = -3
+    EE_TLS_NAME = "ee-tls"
     task_name_deploy_vca = "Deploying VCA"
 
     def __init__(self, msg, lcm_tasks, config: LcmCfg):
@@ -963,6 +964,10 @@ class NsLcm(LcmBase):
             image["vim_info"] = {}
         for flavor in target["flavor"]:
             flavor["vim_info"] = {}
+        if db_nsr.get("shared-volumes"):
+            target["shared-volumes"] = deepcopy(db_nsr["shared-volumes"])
+            for shared_volumes in target["shared-volumes"]:
+                shared_volumes["vim_info"] = {}
         if db_nsr.get("affinity-or-anti-affinity-group"):
             target["affinity-or-anti-affinity-group"] = deepcopy(
                 db_nsr["affinity-or-anti-affinity-group"]
@@ -1233,6 +1238,15 @@ class NsLcm(LcmBase):
                         if target_vim not in ns_ags["vim_info"]:
                             ns_ags["vim_info"][target_vim] = {}
 
+                # shared-volumes
+                if vdur.get("shared-volumes-id"):
+                    for sv_id in vdur["shared-volumes-id"]:
+                        ns_sv = find_in_list(
+                            target["shared-volumes"], lambda sv: sv_id in sv["id"]
+                        )
+                        if ns_sv:
+                            ns_sv["vim_info"][target_vim] = {}
+
                 vdur["vim_info"] = {target_vim: {}}
                 # instantiation parameters
                 if vnf_params:
@@ -1817,7 +1831,7 @@ class NsLcm(LcmBase):
                     ee_id, credentials = await self.vca_map[
                         vca_type
                     ].create_execution_environment(
-                        namespace=namespace,
+                        namespace=nsr_id,
                         reuse_ee_id=ee_id,
                         db_dict=db_dict,
                         config=osm_config,
@@ -2737,13 +2751,16 @@ class NsLcm(LcmBase):
 
             # create namespace and certificate if any helm based EE is present in the NS
             if check_helm_ee_in_ns(db_vnfds):
-                # TODO: create EE namespace
+                await self.vca_map["helm-v3"].setup_ns_namespace(
+                    name=nsr_id,
+                )
                 # create TLS certificates
                 await self.vca_map["helm-v3"].create_tls_certificate(
-                    secret_name="ee-tls-{}".format(nsr_id),
+                    secret_name=self.EE_TLS_NAME,
                     dns_prefix="*",
                     nsr_id=nsr_id,
                     usage="server auth",
+                    namespace=nsr_id,
                 )
 
             nsi_id = None  # TODO put nsi_id when this nsr belongs to a NSI
@@ -2879,6 +2896,65 @@ class NsLcm(LcmBase):
                             stage=stage,
                         )
 
+            # Check if each vnf has exporter for metric collection if so update prometheus job records
+            if "exporters-endpoints" in vnfd.get("df")[0]:
+                exporter_config = vnfd.get("df")[0].get("exporters-endpoints")
+                self.logger.debug("exporter config :{}".format(exporter_config))
+                artifact_path = "{}/{}/{}".format(
+                    base_folder["folder"],
+                    base_folder["pkg-dir"],
+                    "exporter-endpoint",
+                )
+                ee_id = None
+                ee_config_descriptor = exporter_config
+                vnfr_id = db_vnfr["id"]
+                rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
+                    logging_text,
+                    nsr_id,
+                    vnfr_id,
+                    vdu_id=None,
+                    vdu_index=None,
+                    user=None,
+                    pub_key=None,
+                )
+                self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
+                self.logger.debug("Artifact_path:{}".format(artifact_path))
+                db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
+                vdu_id_for_prom = None
+                vdu_index_for_prom = None
+                for x in get_iterable(db_vnfr, "vdur"):
+                    vdu_id_for_prom = x.get("vdu-id-ref")
+                    vdu_index_for_prom = x.get("count-index")
+                prometheus_jobs = await self.extract_prometheus_scrape_jobs(
+                    ee_id=ee_id,
+                    artifact_path=artifact_path,
+                    ee_config_descriptor=ee_config_descriptor,
+                    vnfr_id=vnfr_id,
+                    nsr_id=nsr_id,
+                    target_ip=rw_mgmt_ip,
+                    element_type="VDU",
+                    vdu_id=vdu_id_for_prom,
+                    vdu_index=vdu_index_for_prom,
+                )
+
+                self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
+                if prometheus_jobs:
+                    db_nsr_update["_admin.deployed.prometheus_jobs"] = prometheus_jobs
+                    self.update_db_2(
+                        "nsrs",
+                        nsr_id,
+                        db_nsr_update,
+                    )
+
+                    for job in prometheus_jobs:
+                        self.db.set_one(
+                            "prometheus_jobs",
+                            {"job_name": job["job_name"]},
+                            job,
+                            upsert=True,
+                            fail_on_empty=False,
+                        )
+
             # Check if this NS has a charm configuration
             descriptor_config = nsd.get("ns-configuration")
             if descriptor_config and descriptor_config.get("juju"):
@@ -4292,7 +4368,7 @@ class NsLcm(LcmBase):
         # TODO vdu_index_count
         for vca in vca_deployed_list:
             if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
-                return vca["ee_id"]
+                return vca.get("ee_id")
 
     async def destroy_N2VC(
         self,
@@ -4574,9 +4650,12 @@ class NsLcm(LcmBase):
             # Delete Namespace and Certificates if necessary
             if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
                 await self.vca_map["helm-v3"].delete_tls_certificate(
-                    certificate_name=db_nslcmop["nsInstanceId"],
+                    namespace=db_nslcmop["nsInstanceId"],
+                    certificate_name=self.EE_TLS_NAME,
+                )
+                await self.vca_map["helm-v3"].delete_namespace(
+                    namespace=db_nslcmop["nsInstanceId"],
                 )
-                # TODO: Delete namespace
 
             # Delete from k8scluster
             stage[1] = "Deleting KDUs."
@@ -7485,6 +7564,8 @@ class NsLcm(LcmBase):
         )
         if not job_file:
             return
+        self.logger.debug("Artifact path{}".format(artifact_path))
+        self.logger.debug("job file{}".format(job_file))
         with self.fs.file_open((artifact_path, job_file), "r") as f:
             job_data = f.read()
 
@@ -7535,21 +7616,33 @@ class NsLcm(LcmBase):
                     )
 
         # TODO get_service
-        _, _, service = ee_id.partition(".")  # remove prefix   "namespace."
-        host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
-        host_port = "80"
-        vnfr_id = vnfr_id.replace("-", "")
-        variables = {
-            "JOB_NAME": vnfr_id,
-            "TARGET_IP": target_ip,
-            "EXPORTER_POD_IP": host_name,
-            "EXPORTER_POD_PORT": host_port,
-            "NSR_ID": nsr_id,
-            "VNF_MEMBER_INDEX": vnf_member_index,
-            "VDUR_NAME": vdur_name,
-            "KDUR_NAME": kdur_name,
-            "ELEMENT_TYPE": element_type,
-        }
+        if ee_id is not None:
+            _, _, service = ee_id.partition(".")  # remove prefix   "namespace."
+            host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
+            host_port = "80"
+            vnfr_id = vnfr_id.replace("-", "")
+            variables = {
+                "JOB_NAME": vnfr_id,
+                "TARGET_IP": target_ip,
+                "EXPORTER_POD_IP": host_name,
+                "EXPORTER_POD_PORT": host_port,
+                "NSR_ID": nsr_id,
+                "VNF_MEMBER_INDEX": vnf_member_index,
+                "VDUR_NAME": vdur_name,
+                "KDUR_NAME": kdur_name,
+                "ELEMENT_TYPE": element_type,
+            }
+        else:
+            metric_path = ee_config_descriptor["metric-path"]
+            target_port = ee_config_descriptor["metric-port"]
+            vnfr_id = vnfr_id.replace("-", "")
+            variables = {
+                "JOB_NAME": vnfr_id,
+                "TARGET_IP": target_ip,
+                "TARGET_PORT": target_port,
+                "METRIC_PATH": metric_path,
+            }
+
         job_list = parse_job(job_data, variables)
         # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
         for job in job_list:
@@ -7557,7 +7650,7 @@ class NsLcm(LcmBase):
                 not isinstance(job.get("job_name"), str)
                 or vnfr_id not in job["job_name"]
             ):
-                job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
+                job["job_name"] = vnfr_id + "_" + str(SystemRandom().randint(1, 10000))
             job["nsr_id"] = nsr_id
             job["vnfr_id"] = vnfr_id
         return job_list