X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=osm_lcm%2Fns.py;h=19be20839734a9f7469876c1b5698c428bee3052;hb=refs%2Ftags%2Fv11.0.4;hp=c35311a376809628eae9da6be964f941215a50fc;hpb=c480cae45157fcc270fd4a50af42e47b10cc2f4c;p=osm%2FLCM.git diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index c35311a..19be208 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -348,43 +348,49 @@ class NsLcm(LcmBase): self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e)) async def _on_update_k8s_db( - self, cluster_uuid, kdu_instance, filter=None, vca_id=None + self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju" ): """ Updating vca status in NSR record :param cluster_uuid: UUID of a k8s cluster :param kdu_instance: The unique name of the KDU instance :param filter: To get nsr_id + :cluster_type: The cluster type (juju, k8s) :return: none """ # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}" # .format(cluster_uuid, kdu_instance, filter)) + nsr_id = filter.get("_id") try: - nsr_id = filter.get("_id") - - # get vca status for NS - vca_status = await self.k8sclusterjuju.status_kdu( - cluster_uuid, - kdu_instance, - complete_status=True, + vca_status = await self.k8scluster_map[cluster_type].status_kdu( + cluster_uuid=cluster_uuid, + kdu_instance=kdu_instance, yaml_format=False, + complete_status=True, vca_id=vca_id, ) + # vcaStatus db_dict = dict() db_dict["vcaStatus"] = {nsr_id: vca_status} - await self.k8sclusterjuju.update_vca_status( - db_dict["vcaStatus"], - kdu_instance, - vca_id=vca_id, + if cluster_type in ("juju-bundle", "juju"): + # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA + # status in a similar way between Juju Bundles and Helm Charts on this side + await self.k8sclusterjuju.update_vca_status( + db_dict["vcaStatus"], + kdu_instance, + vca_id=vca_id, + ) + + self.logger.debug( + f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}" ) # write to database self.update_db_2("nsrs", nsr_id, db_dict) - except (asyncio.CancelledError, asyncio.TimeoutError): raise except Exception as e: @@ -532,7 +538,7 @@ class NsLcm(LcmBase): ) if not vdur: # Read the template saved in the db: - self.logger.debug(f"No vdur in the database. Using the vdur-template to scale") + self.logger.debug("No vdur in the database. Using the vdur-template to scale") vdur_template = db_vnfr.get("vdur-template") if not vdur_template: raise LcmException( @@ -541,12 +547,13 @@ class NsLcm(LcmBase): ) ) vdur = vdur_template[0] - #Delete a template from the database after using it - self.db.set_one("vnfrs", - {"_id": db_vnfr["_id"]}, - None, - pull={"vdur-template": {"_id": vdur['_id']}} - ) + # Delete a template from the database after using it + self.db.set_one( + "vnfrs", + {"_id": db_vnfr["_id"]}, + None, + pull={"vdur-template": {"_id": vdur['_id']}} + ) for count in range(vdu_count): vdur_copy = deepcopy(vdur) vdur_copy["status"] = "BUILD" @@ -580,7 +587,7 @@ class NsLcm(LcmBase): if vdu_delete: if len(db_vnfr["vdur"]) == 1: # The scale will move to 0 instances - self.logger.debug(f"Scaling to 0 !, creating the template with the last vdur") + self.logger.debug("Scaling to 0 !, creating the template with the last vdur") template_vdur = [db_vnfr["vdur"][0]] for vdu_id, vdu_count in vdu_delete.items(): if mark_delete: @@ -865,18 +872,55 @@ class NsLcm(LcmBase): get_iterable(vdur, "interfaces"), lambda iface: iface.get("ns-vld-id") == a_vld["name"], ) + + vld_params = find_in_list( + get_iterable(ns_params, "vld"), + lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]), + ) if target_vld: + if vnf_params.get("vimAccountId") not in a_vld.get( "vim_info", {} ): + target_vim_network_list = [ + v for _, v in a_vld.get("vim_info").items() + ] + target_vim_network_name = next( + ( + item.get("vim_network_name", "") + for item in target_vim_network_list + ), + "", + ) + target["ns"]["vld"][a_index].get("vim_info").update( { "vim:{}".format(vnf_params["vimAccountId"]): { - "vim_network_name": "" + "vim_network_name": target_vim_network_name, } } ) + if vld_params: + for param in ("vim-network-name", "vim-network-id"): + if vld_params.get(param) and isinstance( + vld_params[param], dict + ): + for vim, vim_net in vld_params[ + param + ].items(): + other_target_vim = "vim:" + vim + populate_dict( + target["ns"]["vld"][a_index].get( + "vim_info" + ), + ( + other_target_vim, + param.replace("-", "_"), + ), + vim_net, + ) + nslcmop_id = db_nslcmop["_id"] target = { "name": db_nsr["name"], @@ -1968,6 +2012,9 @@ class NsLcm(LcmBase): step = "Waiting to VM being up and getting IP address" self.logger.debug(logging_text + step) + # default rw_mgmt_ip to None, avoiding the non definition of the variable + rw_mgmt_ip = None + # n2vc_redesign STEP 5.1 # wait for RO (ip-address) Insert pub_key into VM if vnfr_id: @@ -1975,7 +2022,13 @@ class NsLcm(LcmBase): rw_mgmt_ip = await self.wait_kdu_up( logging_text, nsr_id, vnfr_id, kdu_name ) - else: + + # This verification is needed in order to avoid trying to add a public key + # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA + # for a KNF and not for its KDUs, the previous verification gives False, and the code + # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF + # or it is a KNF) + elif db_vnfr.get('vdur'): rw_mgmt_ip = await self.wait_vm_up_insert_key_ro( logging_text, nsr_id, @@ -1985,8 +2038,6 @@ class NsLcm(LcmBase): user=user, pub_key=pub_key, ) - else: - rw_mgmt_ip = None # This is for a NS configuration self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip)) @@ -2077,7 +2128,7 @@ class NsLcm(LcmBase): }, job, upsert=True, - fail_on_empty=False + fail_on_empty=False, ) step = "instantiated at VCA" @@ -3151,9 +3202,45 @@ class NsLcm(LcmBase): kdu_model=k8s_instance_info["kdu-model"], kdu_name=k8s_instance_info["kdu-name"], ) + + # Update the nsrs table with the kdu-instance value self.update_db_2( - "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance} + item="nsrs", + _id=nsr_id, + _desc={nsr_db_path + ".kdu-instance": kdu_instance}, ) + + # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or + # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace + # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous + # namespace, this first verification could be removed, and the next step would be done for any kind + # of KNF. + # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based + # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027) + if k8sclustertype in ("juju", "juju-bundle"): + # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means + # that the user passed a namespace which he wants its KDU to be deployed in) + if ( + self.db.count( + table="nsrs", + q_filter={ + "_id": nsr_id, + "_admin.projects_write": k8s_instance_info["namespace"], + "_admin.projects_read": k8s_instance_info["namespace"], + }, + ) + > 0 + ): + self.logger.debug( + f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}" + ) + self.update_db_2( + item="nsrs", + _id=nsr_id, + _desc={f"{nsr_db_path}.namespace": kdu_instance}, + ) + k8s_instance_info["namespace"] = kdu_instance + await self.k8scluster_map[k8sclustertype].install( cluster_uuid=k8s_instance_info["k8scluster-uuid"], kdu_model=k8s_instance_info["kdu-model"], @@ -3166,9 +3253,6 @@ class NsLcm(LcmBase): kdu_instance=kdu_instance, vca_id=vca_id, ) - self.update_db_2( - "nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance} - ) # Obtain services to obtain management service ip services = await self.k8scluster_map[k8sclustertype].get_services( @@ -3525,7 +3609,7 @@ class NsLcm(LcmBase): vnfd_with_id, k8s_instance_info, k8params=desc_params, - timeout=600, + timeout=1800, vca_id=vca_id, ) ) @@ -4862,10 +4946,18 @@ class NsLcm(LcmBase): db_nsr = self.db.get_one("nsrs", {"_id": nsr_id}) vca_id = self.get_vca_id({}, db_nsr) if db_nsr["_admin"]["deployed"]["K8s"]: - for k8s_index, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]): - cluster_uuid, kdu_instance = k8s["k8scluster-uuid"], k8s["kdu-instance"] + for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]): + cluster_uuid, kdu_instance, cluster_type = ( + k8s["k8scluster-uuid"], + k8s["kdu-instance"], + k8s["k8scluster-type"], + ) await self._on_update_k8s_db( - cluster_uuid, kdu_instance, filter={"_id": nsr_id}, vca_id=vca_id + cluster_uuid=cluster_uuid, + kdu_instance=kdu_instance, + filter={"_id": nsr_id}, + vca_id=vca_id, + cluster_type=cluster_type, ) else: for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]): @@ -6439,13 +6531,7 @@ class NsLcm(LcmBase): ) async def extract_prometheus_scrape_jobs( - self, - ee_id, - artifact_path, - ee_config_descriptor, - vnfr_id, - nsr_id, - target_ip + self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip ): # look if exist a file called 'prometheus*.j2' and artifact_content = self.fs.dir_ls(artifact_path)