X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=osm_lcm%2Fns.py;h=63a9b6721b9d64a814b85f6a88590be25bb2bf20;hb=0ceae9a95032d5d101c3eb19354733ab40c53200;hp=ced0ec08b8ea2eca559c85e930d3110bcb657ca3;hpb=a27dc53c6acd967ea17f0d720a82b23a8404cbfa;p=osm%2FLCM.git diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index ced0ec0..63a9b67 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -61,6 +61,7 @@ from osm_lcm.lcm_utils import ( check_juju_bundle_existence, get_charm_artifact_path, get_ee_id_parts, + vld_to_ro_ip_profile, ) from osm_lcm.data_utils.nsd import ( get_ns_configuration_relation_list, @@ -245,7 +246,6 @@ class NsLcm(LcmBase): return None def _on_update_ro_db(self, nsrs_id, ro_descriptor): - # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id)) try: @@ -263,7 +263,6 @@ class NsLcm(LcmBase): ) async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None): - # remove last dot from path (if exists) if path.endswith("."): path = path[:-1] @@ -271,7 +270,6 @@ class NsLcm(LcmBase): # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}' # .format(table, filter, path, updated_data)) try: - nsr_id = filter.get("_id") # read ns record from database @@ -532,7 +530,6 @@ class NsLcm(LcmBase): return wim_account def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False): - db_vdu_push_list = [] template_vdur = [] db_update = {"_admin.modified": time()} @@ -820,7 +817,6 @@ class NsLcm(LcmBase): start_deploy, timeout_ns_deploy, ): - db_vims = {} def get_vim_account(vim_account_id): @@ -836,9 +832,9 @@ class NsLcm(LcmBase): target_vim, target_vld, vld_params, target_sdn ): if vld_params.get("ip-profile"): - target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[ - "ip-profile" - ] + target_vld["vim_info"][target_vim]["ip_profile"] = vld_to_ro_ip_profile( + vld_params["ip-profile"] + ) if vld_params.get("provider-network"): target_vld["vim_info"][target_vim]["provider_network"] = vld_params[ "provider-network" @@ -915,7 +911,6 @@ class NsLcm(LcmBase): lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]), ) if target_vld: - if vnf_params.get("vimAccountId") not in a_vld.get( "vim_info", {} ): @@ -1014,16 +1009,16 @@ class NsLcm(LcmBase): # check if this network needs SDN assist if vld.get("pci-interfaces"): db_vim = get_vim_account(ns_params["vimAccountId"]) - sdnc_id = db_vim["config"].get("sdn-controller") - if sdnc_id: - sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"]) - target_sdn = "sdn:{}".format(sdnc_id) - target_vld["vim_info"][target_sdn] = { - "sdn": True, - "target_vim": target_vim, - "vlds": [sdn_vld], - "type": vld.get("type"), - } + if vim_config := db_vim.get("config"): + if sdnc_id := vim_config.get("sdn-controller"): + sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"]) + target_sdn = "sdn:{}".format(sdnc_id) + target_vld["vim_info"][target_sdn] = { + "sdn": True, + "target_vim": target_vim, + "vlds": [sdn_vld], + "type": vld.get("type"), + } nsd_vnf_profiles = get_vnf_profiles(nsd) for nsd_vnf_profile in nsd_vnf_profiles: @@ -1050,27 +1045,9 @@ class NsLcm(LcmBase): and nsd_vlp.get("virtual-link-protocol-data") and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data") ): - ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][ + vld_params["ip-profile"] = nsd_vlp["virtual-link-protocol-data"][ "l3-protocol-data" ] - ip_profile_dest_data = {} - if "ip-version" in ip_profile_source_data: - ip_profile_dest_data["ip-version"] = ip_profile_source_data[ - "ip-version" - ] - if "cidr" in ip_profile_source_data: - ip_profile_dest_data["subnet-address"] = ip_profile_source_data[ - "cidr" - ] - if "gateway-ip" in ip_profile_source_data: - ip_profile_dest_data["gateway-address"] = ip_profile_source_data[ - "gateway-ip" - ] - if "dhcp-enabled" in ip_profile_source_data: - ip_profile_dest_data["dhcp-params"] = { - "enabled": ip_profile_source_data["dhcp-enabled"] - } - vld_params["ip-profile"] = ip_profile_dest_data # update vld_params with instantiation params vld_instantiation_params = find_in_list( @@ -1136,28 +1113,9 @@ class NsLcm(LcmBase): and vnfd_vlp.get("virtual-link-protocol-data") and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data") ): - ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][ + vld_params["ip-profile"] = vnfd_vlp["virtual-link-protocol-data"][ "l3-protocol-data" ] - ip_profile_dest_data = {} - if "ip-version" in ip_profile_source_data: - ip_profile_dest_data["ip-version"] = ip_profile_source_data[ - "ip-version" - ] - if "cidr" in ip_profile_source_data: - ip_profile_dest_data["subnet-address"] = ip_profile_source_data[ - "cidr" - ] - if "gateway-ip" in ip_profile_source_data: - ip_profile_dest_data[ - "gateway-address" - ] = ip_profile_source_data["gateway-ip"] - if "dhcp-enabled" in ip_profile_source_data: - ip_profile_dest_data["dhcp-params"] = { - "enabled": ip_profile_source_data["dhcp-enabled"] - } - - vld_params["ip-profile"] = ip_profile_dest_data # update vld_params with instantiation params if vnf_params: vld_instantiation_params = find_in_list( @@ -1378,7 +1336,6 @@ class NsLcm(LcmBase): } desc = await self.RO.deploy(nsr_id, target) action_id = desc["action_id"] - db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING" self.logger.debug( logging_text @@ -1396,20 +1353,17 @@ class NsLcm(LcmBase): stage, operation="termination", ) - - db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED" # delete all nsr await self.RO.delete(nsr_id) - except Exception as e: - if isinstance(e, NgRoException) and e.http_code == 404: # not found + except NgRoException as e: + if e.http_code == 404: # not found db_nsr_update["_admin.deployed.RO.nsr_id"] = None db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED" - db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None self.logger.debug( logging_text + "RO_action_id={} already deleted".format(action_id) ) - elif isinstance(e, NgRoException) and e.http_code == 409: # conflict + elif e.http_code == 409: # conflict failed_detail.append("delete conflict: {}".format(e)) self.logger.debug( logging_text @@ -1421,6 +1375,11 @@ class NsLcm(LcmBase): logging_text + "RO_action_id={} delete error: {}".format(action_id, e) ) + except Exception as e: + failed_detail.append("delete error: {}".format(e)) + self.logger.error( + logging_text + "RO_action_id={} delete error: {}".format(action_id, e) + ) if failed_detail: stage[2] = "Error deleting from VIM" @@ -1562,14 +1521,11 @@ class NsLcm(LcmBase): """ self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro") - ro_nsr_id = None ip_address = None - nb_tries = 0 target_vdu_id = None ro_retries = 0 while True: - ro_retries += 1 if ro_retries >= 360: # 1 hour raise LcmException( @@ -1652,74 +1608,24 @@ class NsLcm(LcmBase): self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU") return ip_address try: - ro_vm_id = "{}-{}".format( - db_vnfr["member-vnf-index-ref"], target_vdu_id - ) # TODO add vdu_index - if self.ro_config.ng: - target = { - "action": { - "action": "inject_ssh_key", - "key": pub_key, - "user": user, - }, - "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}], - } - desc = await self.RO.deploy(nsr_id, target) - action_id = desc["action_id"] - await self._wait_ng_ro( - nsr_id, action_id, timeout=600, operation="instantiation" - ) - break - else: - # wait until NS is deployed at RO - if not ro_nsr_id: - db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id}) - ro_nsr_id = deep_get( - db_nsrs, ("_admin", "deployed", "RO", "nsr_id") - ) - if not ro_nsr_id: - continue - result_dict = await self.RO.create_action( - item="ns", - item_id_name=ro_nsr_id, - descriptor={ - "add_public_key": pub_key, - "vms": [ro_vm_id], - "user": user, - }, - ) - # result_dict contains the format {VM-id: {vim_result: 200, description: text}} - if not result_dict or not isinstance(result_dict, dict): - raise LcmException( - "Unknown response from RO when injecting key" - ) - for result in result_dict.values(): - if result.get("vim_result") == 200: - break - else: - raise ROclient.ROClientException( - "error injecting key: {}".format( - result.get("description") - ) - ) - break + target = { + "action": { + "action": "inject_ssh_key", + "key": pub_key, + "user": user, + }, + "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}], + } + desc = await self.RO.deploy(nsr_id, target) + action_id = desc["action_id"] + await self._wait_ng_ro( + nsr_id, action_id, timeout=600, operation="instantiation" + ) + break except NgRoException as e: raise LcmException( "Reaching max tries injecting key. Error: {}".format(e) ) - except ROclient.ROClientException as e: - if not nb_tries: - self.logger.debug( - logging_text - + "error injecting key: {}. Retrying until {} seconds".format( - e, 20 * 10 - ) - ) - nb_tries += 1 - if nb_tries >= 20: - raise LcmException( - "Reaching max tries injecting key. Error: {}".format(e) - ) else: break @@ -1782,6 +1688,7 @@ class NsLcm(LcmBase): vdu_id, kdu_name, vdu_index, + kdu_index, config_descriptor, deploy_params, base_folder, @@ -1803,7 +1710,6 @@ class NsLcm(LcmBase): } step = "" try: - element_type = "NS" element_under_configuration = nsr_id @@ -1887,7 +1793,6 @@ class NsLcm(LcmBase): vca_id = self.get_vca_id(db_vnfr, db_nsr) # create or register execution environment in VCA if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"): - self._write_configuration_status( nsr_id=nsr_id, vca_index=vca_index, @@ -2043,13 +1948,16 @@ class NsLcm(LcmBase): ) # add relations for this VCA (wait for other peers related with this VCA) - await self._add_vca_relations( + is_relation_added = await self._add_vca_relations( logging_text=logging_text, nsr_id=nsr_id, vca_type=vca_type, vca_index=vca_index, ) + if not is_relation_added: + raise LcmException("Relations could not be added to VCA.") + # if SSH access is required, then get execution environment SSH public # if native charm we have waited already to VM be UP if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"): @@ -2204,6 +2112,12 @@ class NsLcm(LcmBase): vnfr_id=vnfr_id, nsr_id=nsr_id, target_ip=rw_mgmt_ip, + element_type=element_type, + vnf_member_index=db_vnfr.get("member-vnf-index-ref", ""), + vdu_id=vdu_id, + vdu_index=vdu_index, + kdu_name=kdu_name, + kdu_index=kdu_index, ) if prometheus_jobs: self.update_db_2( @@ -2239,7 +2153,7 @@ class NsLcm(LcmBase): self._write_configuration_status( nsr_id=nsr_id, vca_index=vca_index, status="BROKEN" ) - raise LcmException("{} {}".format(step, e)) from e + raise LcmException("{}. {}".format(step, e)) from e def _write_ns_status( self, @@ -2339,7 +2253,6 @@ class NsLcm(LcmBase): element_type: str = None, other_update: dict = None, ): - # self.logger.debug('_write_configuration_status(): vca_index={}, status={}' # .format(vca_index, status)) @@ -2452,6 +2365,8 @@ class NsLcm(LcmBase): # update operation on nslcmops db_nslcmop_update = {} + timeout_ns_deploy = self.timeout.ns_deploy + nslcmop_operation_state = None db_vnfrs = {} # vnf's info indexed by member-index # n2vc_info = {} @@ -2492,8 +2407,6 @@ class NsLcm(LcmBase): ns_params = db_nslcmop.get("operationParams") if ns_params and ns_params.get("timeout_ns_deploy"): timeout_ns_deploy = ns_params["timeout_ns_deploy"] - else: - timeout_ns_deploy = self.timeout.ns_deploy # read from db: ns stage[1] = "Getting nsr={} from db.".format(nsr_id) @@ -2640,6 +2553,7 @@ class NsLcm(LcmBase): vdu_index = 0 vdu_name = None kdu_name = None + kdu_index = None # Get additional parameters deploy_params = {"OSM": get_osm_params(db_vnfr)} @@ -2663,6 +2577,7 @@ class NsLcm(LcmBase): kdu_name=kdu_name, member_vnf_index=member_vnf_index, vdu_index=vdu_index, + kdu_index=kdu_index, vdu_name=vdu_name, deploy_params=deploy_params, descriptor_config=descriptor_config, @@ -2695,6 +2610,7 @@ class NsLcm(LcmBase): if descriptor_config: vdu_name = None kdu_name = None + kdu_index = None for vdu_index in range(vdud_count): # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"] self._deploy_n2vc( @@ -2710,6 +2626,7 @@ class NsLcm(LcmBase): vnfd_id=vnfd_id, vdu_id=vdu_id, kdu_name=kdu_name, + kdu_index=kdu_index, member_vnf_index=member_vnf_index, vdu_index=vdu_index, vdu_name=vdu_name, @@ -2726,8 +2643,10 @@ class NsLcm(LcmBase): vdu_id = None vdu_index = 0 vdu_name = None - kdur = next( - x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name + kdu_index, kdur = next( + x + for x in enumerate(db_vnfr["kdur"]) + if x[1]["kdu-name"] == kdu_name ) deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)} if kdur.get("additionalParams"): @@ -2747,6 +2666,7 @@ class NsLcm(LcmBase): kdu_name=kdu_name, member_vnf_index=member_vnf_index, vdu_index=vdu_index, + kdu_index=kdu_index, vdu_name=vdu_name, deploy_params=deploy_params_kdu, descriptor_config=descriptor_config, @@ -2763,6 +2683,7 @@ class NsLcm(LcmBase): member_vnf_index = None vdu_id = None kdu_name = None + kdu_index = None vdu_index = 0 vdu_name = None @@ -2785,6 +2706,7 @@ class NsLcm(LcmBase): kdu_name=kdu_name, member_vnf_index=member_vnf_index, vdu_index=vdu_index, + kdu_index=kdu_index, vdu_name=vdu_name, deploy_params=deploy_params, descriptor_config=descriptor_config, @@ -2915,9 +2837,11 @@ class NsLcm(LcmBase): self.logger.debug(logging_text + "Exit") self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate") - def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]): + def _get_vnfd(self, vnfd_id: str, projects_read: str, cached_vnfds: Dict[str, Any]): if vnfd_id not in cached_vnfds: - cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id}) + cached_vnfds[vnfd_id] = self.db.get_one( + "vnfds", {"id": vnfd_id, "_admin.projects_read": projects_read} + ) return cached_vnfds[vnfd_id] def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]): @@ -2959,7 +2883,8 @@ class NsLcm(LcmBase): ]: vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"]) vnfd_id = vnf_profile["vnfd-id"] - db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds) + project = nsd["_admin"]["projects_read"][0] + db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds) entity_id = ( vnfd_id if ee_relation_level == EELevel.VNF @@ -3029,10 +2954,14 @@ class NsLcm(LcmBase): cached_vnfds: Dict[str, Any], ) -> List[Relation]: relations = [] + if vca.target_element == "ns": + self.logger.debug("VCA is a NS charm, not a VNF.") + return relations vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id) vnf_profile_id = vnf_profile["id"] vnfd_id = vnf_profile["vnfd-id"] - db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds) + project = nsd["_admin"]["projects_read"][0] + db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds) db_vnf_relations = get_relation_list(db_vnfd, vnfd_id) for r in db_vnf_relations: provider_dict = None @@ -3087,7 +3016,8 @@ class NsLcm(LcmBase): vnf_profiles, lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id, )["vnfd-id"] - db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds) + project = nsd["_admin"]["projects_read"][0] + db_vnfd = self._get_vnfd(vnfd_id, project, cached_vnfds) kdu_resource_profile = get_kdu_resource_profile( db_vnfd, ee_relation.kdu_resource_profile_id ) @@ -3193,11 +3123,14 @@ class NsLcm(LcmBase): requirer_vca_id, relation.requirer.endpoint, ) - await self.vca_map[vca_type].add_relation( - provider=provider_relation_endpoint, - requirer=requirer_relation_endpoint, - ) - # remove entry from relations list + try: + await self.vca_map[vca_type].add_relation( + provider=provider_relation_endpoint, + requirer=requirer_relation_endpoint, + ) + except N2VCException as exception: + self.logger.error(exception) + raise LcmException(exception) return True return False @@ -3209,7 +3142,6 @@ class NsLcm(LcmBase): vca_index: int, timeout: int = 3600, ) -> bool: - # steps: # 1. find all relations for this VCA # 2. wait for other peers related @@ -3287,7 +3219,6 @@ class NsLcm(LcmBase): timeout: int = 600, vca_id: str = None, ): - try: k8sclustertype = k8s_instance_info["k8scluster-type"] # Instantiate kdu @@ -3757,6 +3688,7 @@ class NsLcm(LcmBase): kdu_name, member_vnf_index, vdu_index, + kdu_index, vdu_name, deploy_params, descriptor_config, @@ -3883,6 +3815,7 @@ class NsLcm(LcmBase): vdu_id=vdu_id, kdu_name=kdu_name, vdu_index=vdu_index, + kdu_index=kdu_index, deploy_params=deploy_params, config_descriptor=descriptor_config, base_folder=base_folder, @@ -4261,206 +4194,6 @@ class NsLcm(LcmBase): pass self._write_all_config_status(db_nsr=db_nsr, status="DELETED") - async def _terminate_RO( - self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage - ): - """ - Terminates a deployment from RO - :param logging_text: - :param nsr_deployed: db_nsr._admin.deployed - :param nsr_id: - :param nslcmop_id: - :param stage: list of string with the content to write on db_nslcmop.detailed-status. - this method will update only the index 2, but it will write on database the concatenated content of the list - :return: - """ - db_nsr_update = {} - failed_detail = [] - ro_nsr_id = ro_delete_action = None - if nsr_deployed and nsr_deployed.get("RO"): - ro_nsr_id = nsr_deployed["RO"].get("nsr_id") - ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id") - try: - if ro_nsr_id: - stage[2] = "Deleting ns from VIM." - db_nsr_update["detailed-status"] = " ".join(stage) - self._write_op_status(nslcmop_id, stage) - self.logger.debug(logging_text + stage[2]) - self.update_db_2("nsrs", nsr_id, db_nsr_update) - self._write_op_status(nslcmop_id, stage) - desc = await self.RO.delete("ns", ro_nsr_id) - ro_delete_action = desc["action_id"] - db_nsr_update[ - "_admin.deployed.RO.nsr_delete_action_id" - ] = ro_delete_action - db_nsr_update["_admin.deployed.RO.nsr_id"] = None - db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED" - if ro_delete_action: - # wait until NS is deleted from VIM - stage[2] = "Waiting ns deleted from VIM." - detailed_status_old = None - self.logger.debug( - logging_text - + stage[2] - + " RO_id={} ro_delete_action={}".format( - ro_nsr_id, ro_delete_action - ) - ) - self.update_db_2("nsrs", nsr_id, db_nsr_update) - self._write_op_status(nslcmop_id, stage) - - delete_timeout = 20 * 60 # 20 minutes - while delete_timeout > 0: - desc = await self.RO.show( - "ns", - item_id_name=ro_nsr_id, - extra_item="action", - extra_item_id=ro_delete_action, - ) - - # deploymentStatus - self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc) - - ns_status, ns_status_info = self.RO.check_action_status(desc) - if ns_status == "ERROR": - raise ROclient.ROClientException(ns_status_info) - elif ns_status == "BUILD": - stage[2] = "Deleting from VIM {}".format(ns_status_info) - elif ns_status == "ACTIVE": - db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None - db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED" - break - else: - assert ( - False - ), "ROclient.check_action_status returns unknown {}".format( - ns_status - ) - if stage[2] != detailed_status_old: - detailed_status_old = stage[2] - db_nsr_update["detailed-status"] = " ".join(stage) - self._write_op_status(nslcmop_id, stage) - self.update_db_2("nsrs", nsr_id, db_nsr_update) - await asyncio.sleep(5, loop=self.loop) - delete_timeout -= 5 - else: # delete_timeout <= 0: - raise ROclient.ROClientException( - "Timeout waiting ns deleted from VIM" - ) - - except Exception as e: - self.update_db_2("nsrs", nsr_id, db_nsr_update) - if ( - isinstance(e, ROclient.ROClientException) and e.http_code == 404 - ): # not found - db_nsr_update["_admin.deployed.RO.nsr_id"] = None - db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED" - db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None - self.logger.debug( - logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id) - ) - elif ( - isinstance(e, ROclient.ROClientException) and e.http_code == 409 - ): # conflict - failed_detail.append("delete conflict: {}".format(e)) - self.logger.debug( - logging_text - + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e) - ) - else: - failed_detail.append("delete error: {}".format(e)) - self.logger.error( - logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e) - ) - - # Delete nsd - if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")): - ro_nsd_id = nsr_deployed["RO"]["nsd_id"] - try: - stage[2] = "Deleting nsd from RO." - db_nsr_update["detailed-status"] = " ".join(stage) - self.update_db_2("nsrs", nsr_id, db_nsr_update) - self._write_op_status(nslcmop_id, stage) - await self.RO.delete("nsd", ro_nsd_id) - self.logger.debug( - logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id) - ) - db_nsr_update["_admin.deployed.RO.nsd_id"] = None - except Exception as e: - if ( - isinstance(e, ROclient.ROClientException) and e.http_code == 404 - ): # not found - db_nsr_update["_admin.deployed.RO.nsd_id"] = None - self.logger.debug( - logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id) - ) - elif ( - isinstance(e, ROclient.ROClientException) and e.http_code == 409 - ): # conflict - failed_detail.append( - "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e) - ) - self.logger.debug(logging_text + failed_detail[-1]) - else: - failed_detail.append( - "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e) - ) - self.logger.error(logging_text + failed_detail[-1]) - - if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")): - for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]): - if not vnf_deployed or not vnf_deployed["id"]: - continue - try: - ro_vnfd_id = vnf_deployed["id"] - stage[ - 2 - ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format( - vnf_deployed["member-vnf-index"], ro_vnfd_id - ) - db_nsr_update["detailed-status"] = " ".join(stage) - self.update_db_2("nsrs", nsr_id, db_nsr_update) - self._write_op_status(nslcmop_id, stage) - await self.RO.delete("vnfd", ro_vnfd_id) - self.logger.debug( - logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id) - ) - db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None - except Exception as e: - if ( - isinstance(e, ROclient.ROClientException) and e.http_code == 404 - ): # not found - db_nsr_update[ - "_admin.deployed.RO.vnfd.{}.id".format(index) - ] = None - self.logger.debug( - logging_text - + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id) - ) - elif ( - isinstance(e, ROclient.ROClientException) and e.http_code == 409 - ): # conflict - failed_detail.append( - "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e) - ) - self.logger.debug(logging_text + failed_detail[-1]) - else: - failed_detail.append( - "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e) - ) - self.logger.error(logging_text + failed_detail[-1]) - - if failed_detail: - stage[2] = "Error deleting from VIM" - else: - stage[2] = "Deleted from VIM" - db_nsr_update["detailed-status"] = " ".join(stage) - self.update_db_2("nsrs", nsr_id, db_nsr_update) - self._write_op_status(nslcmop_id, stage) - - if failed_detail: - raise LcmException("; ".join(failed_detail)) - async def terminate(self, nsr_id, nslcmop_id): # Try to lock HA task here task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id) @@ -4665,13 +4398,7 @@ class NsLcm(LcmBase): logging_text, nsr_deployed, nsr_id, nslcmop_id, stage ) ) - else: - task_delete_ro = asyncio.ensure_future( - self._terminate_RO( - logging_text, nsr_deployed, nsr_id, nslcmop_id, stage - ) - ) - tasks_dict_info[task_delete_ro] = "Removing deployment from VIM" + tasks_dict_info[task_delete_ro] = "Removing deployment from VIM" # rest of staff will be done at finally @@ -5108,6 +4835,7 @@ class NsLcm(LcmBase): nslcmop_operation_state = None error_description_nslcmop = None exc = None + step = "" try: # wait for any previous tasks in process step = "Waiting for previous operations to terminate" @@ -5282,9 +5010,10 @@ class NsLcm(LcmBase): del desc_params["kdu_model"] else: kdu_model = kdu.get("kdu-model") - parts = kdu_model.split(sep=":") - if len(parts) == 2: - kdu_model = parts[0] + if kdu_model.count("/") < 2: # helm chart is not embedded + parts = kdu_model.split(sep=":") + if len(parts) == 2: + kdu_model = parts[0] if desc_params.get("kdu_atomic_upgrade"): atomic_upgrade = desc_params.get( "kdu_atomic_upgrade" @@ -5745,7 +5474,6 @@ class NsLcm(LcmBase): raise except Exception as e: - self.logger.debug("Error upgrading charm {}".format(path)) return "FAILED", "Error upgrading charm {}: {}".format(path, e) @@ -5782,6 +5510,7 @@ class NsLcm(LcmBase): exc = None change_type = "updated" detailed_status = "" + member_vnf_index = None try: # wait for any previous tasks in process @@ -5808,7 +5537,6 @@ class NsLcm(LcmBase): nsr_deployed = db_nsr["_admin"].get("deployed") if update_type == "CHANGE_VNFPKG": - # Get the input parameters given through update request vnf_instance_id = db_nslcmop["operationParams"][ "changeVnfPackageData" @@ -5848,7 +5576,6 @@ class NsLcm(LcmBase): step = "Checking if revision has changed in VNFD" if current_vnf_revision != latest_vnfd_revision: - change_type = "policy_updated" # There is new revision of VNFD, update operation is required @@ -5885,7 +5612,6 @@ class NsLcm(LcmBase): step = "Getting descriptor config" if current_vnfd.get("kdu"): - search_key = "kdu_name" else: search_key = "vnfd_id" @@ -5906,7 +5632,6 @@ class NsLcm(LcmBase): # There could be several charm used in the same VNF for ee_item in ee_list: if ee_item.get("juju"): - step = "Getting charm name" charm_name = ee_item["juju"].get("charm") @@ -5968,7 +5693,6 @@ class NsLcm(LcmBase): if find_software_version(current_vnfd) != find_software_version( latest_vnfd ): - step = "Checking if existing VNF has charm" for current_charm_path, target_charm_path in list( charm_artifact_paths @@ -6010,10 +5734,8 @@ class NsLcm(LcmBase): current_charm_path, target_charm_path ) ): - step = "Checking whether VNF uses juju bundle" if check_juju_bundle_existence(current_vnfd): - raise LcmException( "Charm upgrade is not supported for the instance which" " uses juju-bundle: {}".format( @@ -6286,7 +6008,10 @@ class NsLcm(LcmBase): "nslcmop_id": nslcmop_id, "operationState": nslcmop_operation_state, } - if change_type in ("vnf_terminated", "policy_updated"): + if ( + change_type in ("vnf_terminated", "policy_updated") + and member_vnf_index + ): msg.update({"vnf_member_index": member_vnf_index}) await self.msg.aiowrite("ns", change_type, msg, loop=self.loop) except Exception as e: @@ -7024,6 +6749,7 @@ class NsLcm(LcmBase): vdu_id = None vdu_name = None kdu_name = None + kdu_index = None self._deploy_n2vc( logging_text=logging_text + "member_vnf_index={} ".format(member_vnf_index), @@ -7035,6 +6761,7 @@ class NsLcm(LcmBase): vnfd_id=vnfd_id, vdu_id=vdu_id, kdu_name=kdu_name, + kdu_index=kdu_index, member_vnf_index=member_vnf_index, vdu_index=vdu_index, vdu_name=vdu_name, @@ -7061,6 +6788,7 @@ class NsLcm(LcmBase): if descriptor_config: vdu_name = None kdu_name = None + kdu_index = None stage[ 1 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format( @@ -7083,6 +6811,7 @@ class NsLcm(LcmBase): kdu_name=kdu_name, member_vnf_index=member_vnf_index, vdu_index=vdu_index, + kdu_index=kdu_index, vdu_name=vdu_name, deploy_params=deploy_params_vdu, descriptor_config=descriptor_config, @@ -7483,8 +7212,48 @@ class NsLcm(LcmBase): ) async def extract_prometheus_scrape_jobs( - self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip - ): + self, + ee_id: str, + artifact_path: str, + ee_config_descriptor: dict, + vnfr_id: str, + nsr_id: str, + target_ip: str, + element_type: str, + vnf_member_index: str = "", + vdu_id: str = "", + vdu_index: int = None, + kdu_name: str = "", + kdu_index: int = None, + ) -> dict: + """Method to extract prometheus scrape jobs from EE's Prometheus template job file + This method will wait until the corresponding VDU or KDU is fully instantiated + + Args: + ee_id (str): Execution Environment ID + artifact_path (str): Path where the EE's content is (including the Prometheus template file) + ee_config_descriptor (dict): Execution Environment's configuration descriptor + vnfr_id (str): VNFR ID where this EE applies + nsr_id (str): NSR ID where this EE applies + target_ip (str): VDU/KDU instance IP address + element_type (str): NS or VNF or VDU or KDU + vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "". + vdu_id (str, optional): VDU ID where this EE applies. Defaults to "". + vdu_index (int, optional): VDU index where this EE applies. Defaults to None. + kdu_name (str, optional): KDU name where this EE applies. Defaults to "". + kdu_index (int, optional): KDU index where this EE applies. Defaults to None. + + Raises: + LcmException: When the VDU or KDU instance was not found in an hour + + Returns: + _type_: Prometheus jobs + """ + # default the vdur and kdur names to an empty string, to avoid any later + # problem with Prometheus when the element type is not VDU or KDU + vdur_name = "" + kdur_name = "" + # look if exist a file called 'prometheus*.j2' and artifact_content = self.fs.dir_ls(artifact_path) job_file = next( @@ -7500,6 +7269,52 @@ class NsLcm(LcmBase): with self.fs.file_open((artifact_path, job_file), "r") as f: job_data = f.read() + # obtain the VDUR or KDUR, if the element type is VDU or KDU + if element_type in ("VDU", "KDU"): + for _ in range(360): + db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id}) + if vdu_id and vdu_index is not None: + vdur = next( + ( + x + for x in get_iterable(db_vnfr, "vdur") + if ( + x.get("vdu-id-ref") == vdu_id + and x.get("count-index") == vdu_index + ) + ), + {}, + ) + if vdur.get("name"): + vdur_name = vdur.get("name") + break + if kdu_name and kdu_index is not None: + kdur = next( + ( + x + for x in get_iterable(db_vnfr, "kdur") + if ( + x.get("kdu-name") == kdu_name + and x.get("count-index") == kdu_index + ) + ), + {}, + ) + if kdur.get("name"): + kdur_name = kdur.get("name") + break + + await asyncio.sleep(10, loop=self.loop) + else: + if vdu_id and vdu_index is not None: + raise LcmException( + f"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated" + ) + if kdu_name and kdu_index is not None: + raise LcmException( + f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated" + ) + # TODO get_service _, _, service = ee_id.partition(".") # remove prefix "namespace." host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"]) @@ -7510,6 +7325,11 @@ class NsLcm(LcmBase): "TARGET_IP": target_ip, "EXPORTER_POD_IP": host_name, "EXPORTER_POD_PORT": host_port, + "NSR_ID": nsr_id, + "VNF_MEMBER_INDEX": vnf_member_index, + "VDUR_NAME": vdur_name, + "KDUR_NAME": kdur_name, + "ELEMENT_TYPE": element_type, } job_list = parse_job(job_data, variables) # ensure job_name is using the vnfr_id. Adding the metadata nsr_id @@ -8302,7 +8122,6 @@ class NsLcm(LcmBase): } step = "" try: - element_type = "NS" element_under_configuration = nsr_id