X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=osm_lcm%2Fns.py;h=5bbbb21e1e2f4fdeb840580504516917f850e3ef;hb=refs%2Fchanges%2F18%2F11818%2F14;hp=9fd440e86c7272f59f381864d82979f881df5359;hpb=73bac504fefbcddeed4551d97adc0ea13eebd705;p=osm%2FLCM.git diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 9fd440e..5bbbb21 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -348,43 +348,49 @@ class NsLcm(LcmBase): self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e)) async def _on_update_k8s_db( - self, cluster_uuid, kdu_instance, filter=None, vca_id=None + self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju" ): """ Updating vca status in NSR record :param cluster_uuid: UUID of a k8s cluster :param kdu_instance: The unique name of the KDU instance :param filter: To get nsr_id + :cluster_type: The cluster type (juju, k8s) :return: none """ # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}" # .format(cluster_uuid, kdu_instance, filter)) + nsr_id = filter.get("_id") try: - nsr_id = filter.get("_id") - - # get vca status for NS - vca_status = await self.k8sclusterjuju.status_kdu( - cluster_uuid, - kdu_instance, - complete_status=True, + vca_status = await self.k8scluster_map[cluster_type].status_kdu( + cluster_uuid=cluster_uuid, + kdu_instance=kdu_instance, yaml_format=False, + complete_status=True, vca_id=vca_id, ) + # vcaStatus db_dict = dict() db_dict["vcaStatus"] = {nsr_id: vca_status} - await self.k8sclusterjuju.update_vca_status( - db_dict["vcaStatus"], - kdu_instance, - vca_id=vca_id, + if cluster_type in ("juju-bundle", "juju"): + # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA + # status in a similar way between Juju Bundles and Helm Charts on this side + await self.k8sclusterjuju.update_vca_status( + db_dict["vcaStatus"], + kdu_instance, + vca_id=vca_id, + ) + + self.logger.debug( + f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}" ) # write to database self.update_db_2("nsrs", nsr_id, db_dict) - except (asyncio.CancelledError, asyncio.TimeoutError): raise except Exception as e: @@ -440,7 +446,8 @@ class NsLcm(LcmBase): def _get_vdu_additional_params(self, db_vnfr, vdu_id): vdur = next( - vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"] + (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), + {} ) additional_params = vdur.get("additionalParams") return parse_yaml_strings(additional_params) @@ -517,6 +524,7 @@ class NsLcm(LcmBase): def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False): db_vdu_push_list = [] + template_vdur = [] db_update = {"_admin.modified": time()} if vdu_create: for vdu_id, vdu_count in vdu_create.items(): @@ -529,17 +537,27 @@ class NsLcm(LcmBase): None, ) if not vdur: - raise LcmException( - "Error scaling OUT VNFR for {}. There is not any existing vnfr. Scaled to 0?".format( + # Read the template saved in the db: + self.logger.debug(f"No vdur in the database. Using the vdur-template to scale") + vdur_template = db_vnfr.get("vdur-template") + if not vdur_template: + raise LcmException( + "Error scaling OUT VNFR for {}. No vnfr or template exists".format( vdu_id + ) ) - ) - + vdur = vdur_template[0] + #Delete a template from the database after using it + self.db.set_one("vnfrs", + {"_id": db_vnfr["_id"]}, + None, + pull={"vdur-template": {"_id": vdur['_id']}} + ) for count in range(vdu_count): vdur_copy = deepcopy(vdur) vdur_copy["status"] = "BUILD" vdur_copy["status-detailed"] = None - vdur_copy["ip-address"]: None + vdur_copy["ip-address"] = None vdur_copy["_id"] = str(uuid4()) vdur_copy["count-index"] += count + 1 vdur_copy["id"] = "{}-{}".format( @@ -559,12 +577,17 @@ class NsLcm(LcmBase): ) else: iface.pop("mac-address", None) - iface.pop( - "mgmt_vnf", None - ) # only first vdu can be managment of vnf + if db_vnfr["vdur"]: + iface.pop( + "mgmt_vnf", None + ) # only first vdu can be managment of vnf db_vdu_push_list.append(vdur_copy) # self.logger.debug("scale out, adding vdu={}".format(vdur_copy)) if vdu_delete: + if len(db_vnfr["vdur"]) == 1: + # The scale will move to 0 instances + self.logger.debug(f"Scaling to 0 !, creating the template with the last vdur") + template_vdur = [db_vnfr["vdur"][0]] for vdu_id, vdu_count in vdu_delete.items(): if mark_delete: indexes_to_delete = [ @@ -592,7 +615,14 @@ class NsLcm(LcmBase): None, pull={"vdur": {"_id": vdu["_id"]}}, ) - db_push = {"vdur": db_vdu_push_list} if db_vdu_push_list else None + db_push = {} + if db_vdu_push_list: + db_push["vdur"] = db_vdu_push_list + if template_vdur: + db_push["vdur-template"] = template_vdur + if not db_push: + db_push = None + db_vnfr["vdur-template"] = template_vdur self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push) # modify passed dictionary db_vnfr db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]}) @@ -822,6 +852,37 @@ class NsLcm(LcmBase): if vld_params.get("common_id"): target_vld["common_id"] = vld_params.get("common_id") + # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account + def update_ns_vld_target(target, ns_params): + for vnf_params in ns_params.get("vnf", ()): + if vnf_params.get("vimAccountId"): + target_vnf = next( + ( + vnfr + for vnfr in db_vnfrs.values() + if vnf_params["member-vnf-index"] + == vnfr["member-vnf-index-ref"] + ), + None, + ) + vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None) + for a_index, a_vld in enumerate(target["ns"]["vld"]): + target_vld = find_in_list( + get_iterable(vdur, "interfaces"), + lambda iface: iface.get("ns-vld-id") == a_vld["name"], + ) + if target_vld: + if vnf_params.get("vimAccountId") not in a_vld.get( + "vim_info", {} + ): + target["ns"]["vld"][a_index].get("vim_info").update( + { + "vim:{}".format(vnf_params["vimAccountId"]): { + "vim_network_name": "" + } + } + ) + nslcmop_id = db_nslcmop["_id"] target = { "name": db_nsr["name"], @@ -836,6 +897,14 @@ class NsLcm(LcmBase): image["vim_info"] = {} for flavor in target["flavor"]: flavor["vim_info"] = {} + if db_nsr.get("affinity-or-anti-affinity-group"): + target["affinity-or-anti-affinity-group"] = deepcopy( + db_nsr["affinity-or-anti-affinity-group"] + ) + for affinity_or_anti_affinity_group in target[ + "affinity-or-anti-affinity-group" + ]: + affinity_or_anti_affinity_group["vim_info"] = {} if db_nslcmop.get("lcmOperationType") != "instantiate": # get parameters of instantiation: @@ -937,6 +1006,8 @@ class NsLcm(LcmBase): vld_params.update(vld_instantiation_params) parse_vld_instantiation_params(target_vim, target_vld, vld_params, None) target["ns"]["vld"].append(target_vld) + # Update the target ns_vld if vnf vim_account is overriden by instantiation params + update_ns_vld_target(target, ns_params) for vnfr in db_vnfrs.values(): vnfd = find_in_list( @@ -1126,6 +1197,13 @@ class NsLcm(LcmBase): if target_vim not in ns_image["vim_info"]: ns_image["vim_info"][target_vim] = {} + # Affinity groups + if vdur.get("affinity-or-anti-affinity-group-id"): + for ags_id in vdur["affinity-or-anti-affinity-group-id"]: + ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)] + if target_vim not in ns_ags["vim_info"]: + ns_ags["vim_info"][target_vim] = {} + vdur["vim_info"] = {target_vim: {}} # instantiation parameters # if vnf_params: @@ -1665,7 +1743,8 @@ class NsLcm(LcmBase): base_folder["folder"], base_folder["pkg-dir"], "charms" - if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm") + if vca_type + in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm") else "helm-charts", vca_name, ) @@ -1673,7 +1752,8 @@ class NsLcm(LcmBase): artifact_path = "{}/Scripts/{}/{}/".format( base_folder["folder"], "charms" - if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm") + if vca_type + in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm") else "helm-charts", vca_name, ) @@ -2004,12 +2084,10 @@ class NsLcm(LcmBase): for job in prometheus_jobs: self.db.set_one( "prometheus_jobs", - { - "job_name": job["job_name"] - }, + {"job_name": job["job_name"]}, job, upsert=True, - fail_on_empty=False + fail_on_empty=False, ) step = "instantiated at VCA" @@ -2276,6 +2354,10 @@ class NsLcm(LcmBase): # read from db: operation stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id) db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id}) + if db_nslcmop["operationParams"].get("additionalParamsForVnf"): + db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads( + db_nslcmop["operationParams"]["additionalParamsForVnf"] + ) ns_params = db_nslcmop.get("operationParams") if ns_params and ns_params.get("timeout_ns_deploy"): timeout_ns_deploy = ns_params["timeout_ns_deploy"] @@ -2286,8 +2368,10 @@ class NsLcm(LcmBase): # read from db: ns stage[1] = "Getting nsr={} from db.".format(nsr_id) + self.logger.debug(logging_text + stage[1]) db_nsr = self.db.get_one("nsrs", {"_id": nsr_id}) stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"]) + self.logger.debug(logging_text + stage[1]) nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]}) self.fs.sync(db_nsr["nsd-id"]) db_nsr["nsd"] = nsd @@ -2303,6 +2387,16 @@ class NsLcm(LcmBase): # for each vnf in ns, read vnfd for vnfr in db_vnfrs_list: + if vnfr.get("kdur"): + kdur_list = [] + for kdur in vnfr["kdur"]: + if kdur.get("additionalParams"): + kdur["additionalParams"] = json.loads( + kdur["additionalParams"] + ) + kdur_list.append(kdur) + vnfr["kdur"] = kdur_list + db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr vnfd_id = vnfr["vnfd-id"] vnfd_ref = vnfr["vnfd-ref"] @@ -2497,8 +2591,8 @@ class NsLcm(LcmBase): ) deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)} if kdur.get("additionalParams"): - deploy_params_kdu = parse_yaml_strings( - kdur["additionalParams"] + deploy_params_kdu.update( + parse_yaml_strings(kdur["additionalParams"].copy()) ) self._deploy_n2vc( @@ -2770,7 +2864,9 @@ class NsLcm(LcmBase): if requirer_id != nsd["id"]: requirer_dict["vnf-profile-id"] = requirer_id else: - raise Exception("provider/requirer or entities must be included in the relation.") + raise Exception( + "provider/requirer or entities must be included in the relation." + ) relation_provider = self._update_ee_relation_data_with_implicit_data( nsr_id, nsd, provider_dict, cached_vnfds ) @@ -2822,7 +2918,9 @@ class NsLcm(LcmBase): if requirer_id != vnfd_id: requirer_dict["vdu-profile-id"] = requirer_id else: - raise Exception("provider/requirer or entities must be included in the relation.") + raise Exception( + "provider/requirer or entities must be included in the relation." + ) relation_provider = self._update_ee_relation_data_with_implicit_data( nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id ) @@ -4780,10 +4878,18 @@ class NsLcm(LcmBase): db_nsr = self.db.get_one("nsrs", {"_id": nsr_id}) vca_id = self.get_vca_id({}, db_nsr) if db_nsr["_admin"]["deployed"]["K8s"]: - for k8s_index, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]): - cluster_uuid, kdu_instance = k8s["k8scluster-uuid"], k8s["kdu-instance"] + for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]): + cluster_uuid, kdu_instance, cluster_type = ( + k8s["k8scluster-uuid"], + k8s["kdu-instance"], + k8s["k8scluster-type"], + ) await self._on_update_k8s_db( - cluster_uuid, kdu_instance, filter={"_id": nsr_id}, vca_id=vca_id + cluster_uuid=cluster_uuid, + kdu_instance=kdu_instance, + filter={"_id": nsr_id}, + vca_id=vca_id, + cluster_type=cluster_type, ) else: for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]): @@ -4825,6 +4931,10 @@ class NsLcm(LcmBase): step = "Getting information from database" db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id}) db_nsr = self.db.get_one("nsrs", {"_id": nsr_id}) + if db_nslcmop["operationParams"].get("primitive_params"): + db_nslcmop["operationParams"]["primitive_params"] = json.loads( + db_nslcmop["operationParams"]["primitive_params"] + ) nsr_deployed = db_nsr["_admin"].get("deployed") vnf_index = db_nslcmop["operationParams"].get("member_vnf_index") @@ -4842,6 +4952,15 @@ class NsLcm(LcmBase): db_vnfr = self.db.get_one( "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id} ) + if db_vnfr.get("kdur"): + kdur_list = [] + for kdur in db_vnfr["kdur"]: + if kdur.get("additionalParams"): + kdur["additionalParams"] = json.loads( + kdur["additionalParams"] + ) + kdur_list.append(kdur) + db_vnfr["kdur"] = kdur_list step = "Getting vnfd from database" db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]}) @@ -5331,7 +5450,7 @@ class NsLcm(LcmBase): for kdu_delta in delta.get("kdu-resource-delta", {}): kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"]) kdu_name = kdu_profile["kdu-name"] - resource_name = kdu_profile["resource-name"] + resource_name = kdu_profile.get("resource-name", "") # Might have different kdus in the same delta # Should have list for each kdu @@ -5347,7 +5466,6 @@ class NsLcm(LcmBase): and kdur.get("helm-version") == "v2" ): k8s_cluster_type = "helm-chart" - raise NotImplementedError elif kdur.get("juju-bundle"): k8s_cluster_type = "juju-bundle" else: @@ -5377,7 +5495,13 @@ class NsLcm(LcmBase): kdu_instance = deployed_kdu.get("kdu-instance") instance_num = await self.k8scluster_map[ k8s_cluster_type - ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id) + ].get_scale_count( + resource_name, + kdu_instance, + vca_id=vca_id, + cluster_uuid=deployed_kdu.get("k8scluster-uuid"), + kdu_model=deployed_kdu.get("kdu-model"), + ) kdu_replica_count = instance_num + kdu_delta.get( "number-of-instances", 1 ) @@ -5461,7 +5585,7 @@ class NsLcm(LcmBase): for kdu_delta in delta.get("kdu-resource-delta", {}): kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"]) kdu_name = kdu_profile["kdu-name"] - resource_name = kdu_profile["resource-name"] + resource_name = kdu_profile.get("resource-name", "") if not scaling_info["kdu-delete"].get(kdu_name, None): scaling_info["kdu-delete"][kdu_name] = [] @@ -5475,7 +5599,6 @@ class NsLcm(LcmBase): and kdur.get("helm-version") == "v2" ): k8s_cluster_type = "helm-chart" - raise NotImplementedError elif kdur.get("juju-bundle"): k8s_cluster_type = "juju-bundle" else: @@ -5503,7 +5626,13 @@ class NsLcm(LcmBase): kdu_instance = deployed_kdu.get("kdu-instance") instance_num = await self.k8scluster_map[ k8s_cluster_type - ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id) + ].get_scale_count( + resource_name, + kdu_instance, + vca_id=vca_id, + cluster_uuid=deployed_kdu.get("k8scluster-uuid"), + kdu_model=deployed_kdu.get("kdu-model"), + ) kdu_replica_count = instance_num - kdu_delta.get( "number-of-instances", 1 ) @@ -5702,7 +5831,7 @@ class NsLcm(LcmBase): ] = "Deleting the execution environments" scale_process = "VCA" for vca_info in vca_scaling_info: - if vca_info["type"] == "delete": + if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"): member_vnf_index = str(vca_info["member-vnf-index"]) self.logger.debug( logging_text + "vdu info: {}".format(vca_info) @@ -5715,14 +5844,6 @@ class NsLcm(LcmBase): ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format( member_vnf_index, vdu_id, vdu_index ) - else: - vdu_index = 0 - kdu_id = vca_info["osm_kdu_id"] - stage[ - 1 - ] = "Scaling member_vnf_index={}, kdu_id={}, vdu_index={} ".format( - member_vnf_index, kdu_id, vdu_index - ) stage[2] = step = "Scaling in VCA" self._write_op_status(op_id=nslcmop_id, stage=stage) vca_update = db_nsr["_admin"]["deployed"]["VCA"] @@ -5838,7 +5959,7 @@ class NsLcm(LcmBase): ] = "Creating new execution environments" scale_process = "VCA" for vca_info in vca_scaling_info: - if vca_info["type"] == "create": + if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"): member_vnf_index = str(vca_info["member-vnf-index"]) self.logger.debug( logging_text + "vdu info: {}".format(vca_info) @@ -5926,43 +6047,6 @@ class NsLcm(LcmBase): task_instantiation_info=tasks_dict_info, stage=stage, ) - else: - kdu_name = vca_info["osm_kdu_id"] - descriptor_config = get_configuration(db_vnfd, kdu_name) - if descriptor_config: - vdu_id = None - kdu_index = int(vca_info["kdu_index"]) - vdu_name = None - kdur = next( - x - for x in db_vnfr["kdur"] - if x["kdu-name"] == kdu_name - ) - deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)} - if kdur.get("additionalParams"): - deploy_params_kdu = parse_yaml_strings( - kdur["additionalParams"] - ) - - self._deploy_n2vc( - logging_text=logging_text, - db_nsr=db_nsr, - db_vnfr=db_vnfr, - nslcmop_id=nslcmop_id, - nsr_id=nsr_id, - nsi_id=nsi_id, - vnfd_id=vnfd_id, - vdu_id=vdu_id, - kdu_name=kdu_name, - member_vnf_index=member_vnf_index, - vdu_index=kdu_index, - vdu_name=vdu_name, - deploy_params=deploy_params_kdu, - descriptor_config=descriptor_config, - base_folder=base_folder, - task_instantiation_info=tasks_dict_info, - stage=stage, - ) # SCALE-UP VCA - END scale_process = None @@ -6209,6 +6293,7 @@ class NsLcm(LcmBase): ) cluster_uuid = deployed_kdu["k8scluster-uuid"] kdu_instance = deployed_kdu["kdu-instance"] + kdu_model = deployed_kdu.get("kdu-model") scale = int(kdu_scaling_info["scale"]) k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"] @@ -6263,6 +6348,10 @@ class NsLcm(LcmBase): scale, kdu_scaling_info["resource-name"], vca_id=vca_id, + cluster_uuid=cluster_uuid, + kdu_model=kdu_model, + atomic=True, + db_dict=db_dict, ), timeout=self.timeout_vca_on_error, ) @@ -6347,13 +6436,7 @@ class NsLcm(LcmBase): ) async def extract_prometheus_scrape_jobs( - self, - ee_id, - artifact_path, - ee_config_descriptor, - vnfr_id, - nsr_id, - target_ip + self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip ): # look if exist a file called 'prometheus*.j2' and artifact_content = self.fs.dir_ls(artifact_path)