X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=osm_lcm%2Fns.py;h=1714985affa0ab9180d62677078b15fdc3ce6183;hb=HEAD;hp=71ede2868e37c2ccc0c51474996a9dcff9e8588a;hpb=0c9435ef9d70b276c1d88255c5426d4d3933b8a0;p=osm%2FLCM.git diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 71ede28..1714985 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -23,6 +23,7 @@ import yaml import logging import logging.handlers import traceback +import ipaddress import json from jinja2 import ( Environment, @@ -221,6 +222,18 @@ class NsLcm(LcmBase): if not isinstance(ip_mac, str): return ip_mac try: + next_ipv6 = None + next_ipv4 = None + dual_ip = ip_mac.split(";") + if len(dual_ip) == 2: + for ip in dual_ip: + if ipaddress.ip_address(ip).version == 6: + ipv6 = ipaddress.IPv6Address(ip) + next_ipv6 = str(ipaddress.IPv6Address(int(ipv6) + 1)) + elif ipaddress.ip_address(ip).version == 4: + ipv4 = ipaddress.IPv4Address(ip) + next_ipv4 = str(ipaddress.IPv4Address(int(ipv4) + 1)) + return [next_ipv4, next_ipv6] # try with ipv4 look for last dot i = ip_mac.rfind(".") if i > 0: @@ -252,14 +265,23 @@ class NsLcm(LcmBase): nsr = self.db.get_one(table="nsrs", q_filter=filter) current_ns_status = nsr.get("nsState") - # get vca status for NS + # First, we need to verify if the current vcaStatus is null, because if that is the case, + # MongoDB will not be able to create the fields used within the update key in the database + if not nsr.get("vcaStatus"): + # Write an empty dictionary to the vcaStatus field, it its value is null + self.update_db_2("nsrs", nsr_id, {"vcaStatus": dict()}) + + # Get vca status for NS status_dict = await self.n2vc.get_status( namespace="." + nsr_id, yaml_format=False, vca_id=vca_id ) - # vcaStatus + # Update the vcaStatus + db_key = f"vcaStatus.{nsr_id}.VNF" db_dict = dict() - db_dict["vcaStatus"] = status_dict + + db_dict[db_key] = status_dict[nsr_id] + await self.n2vc.update_vca_status(db_dict[db_key], vca_id=vca_id) # update configurationStatus for this VCA try: @@ -363,9 +385,27 @@ class NsLcm(LcmBase): vca_id=vca_id, ) - # vcaStatus + # First, we need to verify if the current vcaStatus is null, because if that is the case, + # MongoDB will not be able to create the fields used within the update key in the database + nsr = self.db.get_one(table="nsrs", q_filter=filter) + if not nsr.get("vcaStatus"): + # Write an empty dictionary to the vcaStatus field, it its value is null + self.update_db_2("nsrs", nsr_id, {"vcaStatus": dict()}) + + # Update the vcaStatus + db_key = f"vcaStatus.{nsr_id}.KNF" db_dict = dict() - db_dict["vcaStatus"] = {nsr_id: vca_status} + + db_dict[db_key] = vca_status + + if cluster_type in ("juju-bundle", "juju"): + # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA + # status in a similar way between Juju Bundles and Helm Charts on this side + await self.k8sclusterjuju.update_vca_status( + db_dict[db_key], + kdu_instance, + vca_id=vca_id, + ) self.logger.debug( f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}" @@ -3951,25 +3991,12 @@ class NsLcm(LcmBase): ) ) ee_descriptor_id = ee_item.get("id") - if ee_item.get("juju"): - vca_name = ee_item["juju"].get("charm") - if get_charm_name: - charm_name = self.find_charm_name(db_nsr, str(vca_name)) - vca_type = ( - "lxc_proxy_charm" - if ee_item["juju"].get("charm") is not None - else "native_charm" - ) - if ee_item["juju"].get("cloud") == "k8s": - vca_type = "k8s_proxy_charm" - elif ee_item["juju"].get("proxy") is False: - vca_type = "native_charm" - elif ee_item.get("helm-chart"): - vca_name = ee_item["helm-chart"] - vca_type = "helm-v3" - else: + vca_name, charm_name, vca_type = self.get_vca_info( + ee_item, db_nsr, get_charm_name + ) + if not vca_type: self.logger.debug( - logging_text + "skipping non juju neither charm configuration" + logging_text + "skipping, non juju/charm/helm configuration" ) continue @@ -4522,25 +4549,25 @@ class NsLcm(LcmBase): if nsr_deployed.get("VCA"): stage[1] = "Deleting all execution environments." self.logger.debug(logging_text + stage[1]) - vca_id = self.get_vca_id({}, db_nsr) - task_delete_ee = asyncio.ensure_future( - asyncio.wait_for( - self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id), - timeout=self.timeout.charm_delete, + helm_vca_list = get_deployed_vca(db_nsr, {"type": "helm-v3"}) + if helm_vca_list: + # Delete Namespace and Certificates + await self.vca_map["helm-v3"].delete_tls_certificate( + namespace=db_nslcmop["nsInstanceId"], + certificate_name=self.EE_TLS_NAME, ) - ) - # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id)) - tasks_dict_info[task_delete_ee] = "Terminating all VCA" - - # Delete Namespace and Certificates if necessary - if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())): - await self.vca_map["helm-v3"].delete_tls_certificate( - namespace=db_nslcmop["nsInstanceId"], - certificate_name=self.EE_TLS_NAME, - ) - await self.vca_map["helm-v3"].delete_namespace( - namespace=db_nslcmop["nsInstanceId"], - ) + await self.vca_map["helm-v3"].delete_namespace( + namespace=db_nslcmop["nsInstanceId"], + ) + else: + vca_id = self.get_vca_id({}, db_nsr) + task_delete_ee = asyncio.ensure_future( + asyncio.wait_for( + self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id), + timeout=self.timeout.charm_delete, + ) + ) + tasks_dict_info[task_delete_ee] = "Terminating all VCA" # Delete from k8scluster stage[1] = "Deleting KDUs." @@ -5006,7 +5033,7 @@ class NsLcm(LcmBase): vca_id=vca_id, cluster_type=cluster_type, ) - else: + if db_nsr["_admin"]["deployed"]["VCA"]: for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]): table, filter = "nsrs", {"_id": nsr_id} path = "_admin.deployed.VCA.{}.".format(vca_index) @@ -5893,6 +5920,12 @@ class NsLcm(LcmBase): ) ) + step = "Checking whether the descriptor has SFC" + if db_nsr.get("nsd", {}).get("vnffgd"): + raise LcmException( + "Ns update is not allowed for NS with SFC" + ) + # There is no change in the charm package, then redeploy the VNF # based on new descriptor step = "Redeploying VNF" @@ -6250,6 +6283,7 @@ class NsLcm(LcmBase): old_operational_status = "" old_config_status = "" nsi_id = None + prom_job_name = "" try: # wait for any previous tasks in process step = "Waiting for previous operations to terminate" @@ -6272,6 +6306,10 @@ class NsLcm(LcmBase): old_operational_status = db_nsr["operational-status"] old_config_status = db_nsr["config-status"] + step = "Checking whether the descriptor has SFC" + if db_nsr.get("nsd", {}).get("vnffgd"): + raise LcmException("Scaling is not allowed for NS with SFC") + step = "Parsing scaling parameters" db_nsr_update["operational-status"] = "scaling" self.update_db_2("nsrs", nsr_id, db_nsr_update) @@ -6322,7 +6360,11 @@ class NsLcm(LcmBase): nsr_id, { "_admin.scaling-group": [ - {"name": scaling_group, "nb-scale-op": 0} + { + "name": scaling_group, + "vnf_index": vnf_index, + "nb-scale-op": 0, + } ] }, ) @@ -6331,7 +6373,10 @@ class NsLcm(LcmBase): for admin_scale_index, admin_scale_info in enumerate( db_nsr["_admin"]["scaling-group"] ): - if admin_scale_info["name"] == scaling_group: + if ( + admin_scale_info["name"] == scaling_group + and admin_scale_info["vnf_index"] == vnf_index + ): nb_scale_op = admin_scale_info.get("nb-scale-op", 0) break else: # not found, set index one plus last element and add new entry with the name @@ -6339,6 +6384,9 @@ class NsLcm(LcmBase): db_nsr_update[ "_admin.scaling-group.{}.name".format(admin_scale_index) ] = scaling_group + db_nsr_update[ + "_admin.scaling-group.{}.vnf_index".format(admin_scale_index) + ] = vnf_index vca_scaling_info = [] scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []} @@ -6360,6 +6408,15 @@ class NsLcm(LcmBase): vdud = get_vdu(db_vnfd, vdu_delta["id"]) # vdu_index also provides the number of instance of the targeted vdu vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta) + if vdu_index <= len(db_vnfr["vdur"]): + vdu_name_id = db_vnfr["vdur"][vdu_index - 1]["vdu-name"] + prom_job_name = ( + db_vnfr["_id"] + vdu_name_id + str(vdu_index - 1) + ) + prom_job_name = prom_job_name.replace("_", "") + prom_job_name = prom_job_name.replace("-", "") + else: + prom_job_name = None cloud_init_text = self._get_vdu_cloud_init_content( vdud, db_vnfd ) @@ -7148,7 +7205,69 @@ class NsLcm(LcmBase): db_nsr_update["config-status"] = old_config_status scale_process = None # POST-SCALE END + # Check if each vnf has exporter for metric collection if so update prometheus job records + if scaling_type == "SCALE_OUT": + if "exporters-endpoints" in db_vnfd.get("df")[0]: + vnfr_id = db_vnfr["id"] + db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id}) + exporter_config = db_vnfd.get("df")[0].get("exporters-endpoints") + self.logger.debug("exporter config :{}".format(exporter_config)) + artifact_path = "{}/{}/{}".format( + base_folder["folder"], + base_folder["pkg-dir"], + "exporter-endpoint", + ) + ee_id = None + ee_config_descriptor = exporter_config + rw_mgmt_ip = await self.wait_vm_up_insert_key_ro( + logging_text, + nsr_id, + vnfr_id, + vdu_id=db_vnfr["vdur"][-1]["vdu-id-ref"], + vdu_index=db_vnfr["vdur"][-1]["count-index"], + user=None, + pub_key=None, + ) + self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip)) + self.logger.debug("Artifact_path:{}".format(artifact_path)) + vdu_id_for_prom = None + vdu_index_for_prom = None + for x in get_iterable(db_vnfr, "vdur"): + vdu_id_for_prom = x.get("vdu-id-ref") + vdu_index_for_prom = x.get("count-index") + vnfr_id = vnfr_id + vdu_id + str(vdu_index) + vnfr_id = vnfr_id.replace("_", "") + prometheus_jobs = await self.extract_prometheus_scrape_jobs( + ee_id=ee_id, + artifact_path=artifact_path, + ee_config_descriptor=ee_config_descriptor, + vnfr_id=vnfr_id, + nsr_id=nsr_id, + target_ip=rw_mgmt_ip, + element_type="VDU", + vdu_id=vdu_id_for_prom, + vdu_index=vdu_index_for_prom, + ) + self.logger.debug("Prometheus job:{}".format(prometheus_jobs)) + if prometheus_jobs: + db_nsr_update[ + "_admin.deployed.prometheus_jobs" + ] = prometheus_jobs + self.update_db_2( + "nsrs", + nsr_id, + db_nsr_update, + ) + + for job in prometheus_jobs: + self.db.set_one( + "prometheus_jobs", + {"job_name": ""}, + job, + upsert=True, + fail_on_empty=False, + ) db_nsr_update[ "detailed-status" ] = "" # "scaled {} {}".format(scaling_group, scaling_type) @@ -7237,6 +7356,12 @@ class NsLcm(LcmBase): error_description_nslcmop = None nslcmop_operation_state = "COMPLETED" db_nslcmop_update["detailed-status"] = "Done" + if scaling_type == "SCALE_IN" and prom_job_name is not None: + self.db.del_one( + "prometheus_jobs", + {"job_name": prom_job_name}, + fail_on_empty=False, + ) self._write_op_status( op_id=nslcmop_id, @@ -7814,6 +7939,7 @@ class NsLcm(LcmBase): old_config_status = db_nsr["config-status"] db_nsr_update = { + "operational-status": "healing", "_admin.deployed.RO.operational-status": "healing", } self.update_db_2("nsrs", nsr_id, db_nsr_update) @@ -7959,7 +8085,6 @@ class NsLcm(LcmBase): task_instantiation_info=tasks_dict_info, stage=stage, ) - except ( ROclient.ROClientException, DbException, @@ -7981,6 +8106,15 @@ class NsLcm(LcmBase): ) finally: error_list = list() + if db_vnfrs_list and target_list: + for vnfrs in db_vnfrs_list: + for vnf_instance in target_list: + if vnfrs["_id"] == vnf_instance.get("vnfInstanceId"): + self.db.set_list( + "vnfrs", + {"_id": vnfrs["_id"]}, + {"_admin.modified": time()}, + ) if exc: error_list.append(str(exc)) try: @@ -8206,25 +8340,12 @@ class NsLcm(LcmBase): ) ) ee_descriptor_id = ee_item.get("id") - if ee_item.get("juju"): - vca_name = ee_item["juju"].get("charm") - if get_charm_name: - charm_name = self.find_charm_name(db_nsr, str(vca_name)) - vca_type = ( - "lxc_proxy_charm" - if ee_item["juju"].get("charm") is not None - else "native_charm" - ) - if ee_item["juju"].get("cloud") == "k8s": - vca_type = "k8s_proxy_charm" - elif ee_item["juju"].get("proxy") is False: - vca_type = "native_charm" - elif ee_item.get("helm-chart"): - vca_name = ee_item["helm-chart"] - vca_type = "helm-v3" - else: + vca_name, charm_name, vca_type = self.get_vca_info( + ee_item, db_nsr, get_charm_name + ) + if not vca_type: self.logger.debug( - logging_text + "skipping non juju neither charm configuration" + logging_text + "skipping, non juju/charm/helm configuration" ) continue