X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=osm_lcm%2Fns.py;h=9e2ac430bcdcac25f6ccb13eaf2ca81e5f212331;hb=HEAD;hp=2fecb5db4030aa5e18c2127197fddef2a5f44244;hpb=0cd8af3ed256f15931ddc8501c061888fb809050;p=osm%2FLCM.git diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 2fecb5d..1714985 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -265,14 +265,23 @@ class NsLcm(LcmBase): nsr = self.db.get_one(table="nsrs", q_filter=filter) current_ns_status = nsr.get("nsState") - # get vca status for NS + # First, we need to verify if the current vcaStatus is null, because if that is the case, + # MongoDB will not be able to create the fields used within the update key in the database + if not nsr.get("vcaStatus"): + # Write an empty dictionary to the vcaStatus field, it its value is null + self.update_db_2("nsrs", nsr_id, {"vcaStatus": dict()}) + + # Get vca status for NS status_dict = await self.n2vc.get_status( namespace="." + nsr_id, yaml_format=False, vca_id=vca_id ) - # vcaStatus + # Update the vcaStatus + db_key = f"vcaStatus.{nsr_id}.VNF" db_dict = dict() - db_dict["vcaStatus"] = status_dict + + db_dict[db_key] = status_dict[nsr_id] + await self.n2vc.update_vca_status(db_dict[db_key], vca_id=vca_id) # update configurationStatus for this VCA try: @@ -376,9 +385,27 @@ class NsLcm(LcmBase): vca_id=vca_id, ) - # vcaStatus + # First, we need to verify if the current vcaStatus is null, because if that is the case, + # MongoDB will not be able to create the fields used within the update key in the database + nsr = self.db.get_one(table="nsrs", q_filter=filter) + if not nsr.get("vcaStatus"): + # Write an empty dictionary to the vcaStatus field, it its value is null + self.update_db_2("nsrs", nsr_id, {"vcaStatus": dict()}) + + # Update the vcaStatus + db_key = f"vcaStatus.{nsr_id}.KNF" db_dict = dict() - db_dict["vcaStatus"] = {nsr_id: vca_status} + + db_dict[db_key] = vca_status + + if cluster_type in ("juju-bundle", "juju"): + # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA + # status in a similar way between Juju Bundles and Helm Charts on this side + await self.k8sclusterjuju.update_vca_status( + db_dict[db_key], + kdu_instance, + vca_id=vca_id, + ) self.logger.debug( f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}" @@ -3964,25 +3991,12 @@ class NsLcm(LcmBase): ) ) ee_descriptor_id = ee_item.get("id") - if ee_item.get("juju"): - vca_name = ee_item["juju"].get("charm") - if get_charm_name: - charm_name = self.find_charm_name(db_nsr, str(vca_name)) - vca_type = ( - "lxc_proxy_charm" - if ee_item["juju"].get("charm") is not None - else "native_charm" - ) - if ee_item["juju"].get("cloud") == "k8s": - vca_type = "k8s_proxy_charm" - elif ee_item["juju"].get("proxy") is False: - vca_type = "native_charm" - elif ee_item.get("helm-chart"): - vca_name = ee_item["helm-chart"] - vca_type = "helm-v3" - else: + vca_name, charm_name, vca_type = self.get_vca_info( + ee_item, db_nsr, get_charm_name + ) + if not vca_type: self.logger.debug( - logging_text + "skipping non juju neither charm configuration" + logging_text + "skipping, non juju/charm/helm configuration" ) continue @@ -4535,25 +4549,25 @@ class NsLcm(LcmBase): if nsr_deployed.get("VCA"): stage[1] = "Deleting all execution environments." self.logger.debug(logging_text + stage[1]) - vca_id = self.get_vca_id({}, db_nsr) - task_delete_ee = asyncio.ensure_future( - asyncio.wait_for( - self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id), - timeout=self.timeout.charm_delete, + helm_vca_list = get_deployed_vca(db_nsr, {"type": "helm-v3"}) + if helm_vca_list: + # Delete Namespace and Certificates + await self.vca_map["helm-v3"].delete_tls_certificate( + namespace=db_nslcmop["nsInstanceId"], + certificate_name=self.EE_TLS_NAME, ) - ) - # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id)) - tasks_dict_info[task_delete_ee] = "Terminating all VCA" - - # Delete Namespace and Certificates if necessary - if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())): - await self.vca_map["helm-v3"].delete_tls_certificate( - namespace=db_nslcmop["nsInstanceId"], - certificate_name=self.EE_TLS_NAME, - ) - await self.vca_map["helm-v3"].delete_namespace( - namespace=db_nslcmop["nsInstanceId"], - ) + await self.vca_map["helm-v3"].delete_namespace( + namespace=db_nslcmop["nsInstanceId"], + ) + else: + vca_id = self.get_vca_id({}, db_nsr) + task_delete_ee = asyncio.ensure_future( + asyncio.wait_for( + self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id), + timeout=self.timeout.charm_delete, + ) + ) + tasks_dict_info[task_delete_ee] = "Terminating all VCA" # Delete from k8scluster stage[1] = "Deleting KDUs." @@ -5019,7 +5033,7 @@ class NsLcm(LcmBase): vca_id=vca_id, cluster_type=cluster_type, ) - else: + if db_nsr["_admin"]["deployed"]["VCA"]: for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]): table, filter = "nsrs", {"_id": nsr_id} path = "_admin.deployed.VCA.{}.".format(vca_index) @@ -6346,7 +6360,11 @@ class NsLcm(LcmBase): nsr_id, { "_admin.scaling-group": [ - {"name": scaling_group, "nb-scale-op": 0} + { + "name": scaling_group, + "vnf_index": vnf_index, + "nb-scale-op": 0, + } ] }, ) @@ -6355,7 +6373,10 @@ class NsLcm(LcmBase): for admin_scale_index, admin_scale_info in enumerate( db_nsr["_admin"]["scaling-group"] ): - if admin_scale_info["name"] == scaling_group: + if ( + admin_scale_info["name"] == scaling_group + and admin_scale_info["vnf_index"] == vnf_index + ): nb_scale_op = admin_scale_info.get("nb-scale-op", 0) break else: # not found, set index one plus last element and add new entry with the name @@ -6363,6 +6384,9 @@ class NsLcm(LcmBase): db_nsr_update[ "_admin.scaling-group.{}.name".format(admin_scale_index) ] = scaling_group + db_nsr_update[ + "_admin.scaling-group.{}.vnf_index".format(admin_scale_index) + ] = vnf_index vca_scaling_info = [] scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []} @@ -7915,6 +7939,7 @@ class NsLcm(LcmBase): old_config_status = db_nsr["config-status"] db_nsr_update = { + "operational-status": "healing", "_admin.deployed.RO.operational-status": "healing", } self.update_db_2("nsrs", nsr_id, db_nsr_update) @@ -8060,7 +8085,6 @@ class NsLcm(LcmBase): task_instantiation_info=tasks_dict_info, stage=stage, ) - except ( ROclient.ROClientException, DbException, @@ -8082,6 +8106,15 @@ class NsLcm(LcmBase): ) finally: error_list = list() + if db_vnfrs_list and target_list: + for vnfrs in db_vnfrs_list: + for vnf_instance in target_list: + if vnfrs["_id"] == vnf_instance.get("vnfInstanceId"): + self.db.set_list( + "vnfrs", + {"_id": vnfrs["_id"]}, + {"_admin.modified": time()}, + ) if exc: error_list.append(str(exc)) try: @@ -8307,25 +8340,12 @@ class NsLcm(LcmBase): ) ) ee_descriptor_id = ee_item.get("id") - if ee_item.get("juju"): - vca_name = ee_item["juju"].get("charm") - if get_charm_name: - charm_name = self.find_charm_name(db_nsr, str(vca_name)) - vca_type = ( - "lxc_proxy_charm" - if ee_item["juju"].get("charm") is not None - else "native_charm" - ) - if ee_item["juju"].get("cloud") == "k8s": - vca_type = "k8s_proxy_charm" - elif ee_item["juju"].get("proxy") is False: - vca_type = "native_charm" - elif ee_item.get("helm-chart"): - vca_name = ee_item["helm-chart"] - vca_type = "helm-v3" - else: + vca_name, charm_name, vca_type = self.get_vca_info( + ee_item, db_nsr, get_charm_name + ) + if not vca_type: self.logger.debug( - logging_text + "skipping non juju neither charm configuration" + logging_text + "skipping, non juju/charm/helm configuration" ) continue