X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=osm_lcm%2Fns.py;h=75db8e5c006f6b70f642487aca239a613408cbc0;hb=f0f83a3ca9d8a94e464721ca0d99c5ff5d47f405;hp=ea2320f389bae1993183b0f436b9fcc5ab925086;hpb=922c41753ffbb4b526f2135a23c39f480c58e2cb;p=osm%2FLCM.git diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index ea2320f..75db8e5 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -28,12 +28,12 @@ from osm_lcm import ROclient from osm_lcm.ng_ro import NgRoClient, NgRoException from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase, deep_get, get_iterable, populate_dict from osm_lcm.data_utils.nsd import get_vnf_profiles -from osm_lcm.data_utils.vnfd import get_vnf_configuration, get_vdu_list, get_vdu_profile, \ +from osm_lcm.data_utils.vnfd import get_vdu_list, get_vdu_profile, \ get_ee_sorted_initial_config_primitive_list, get_ee_sorted_terminate_config_primitive_list, \ - get_kdu_list, get_virtual_link_profiles, get_vdu, get_vdu_configuration, get_kdu_configuration, \ - get_vdu_index + get_kdu_list, get_virtual_link_profiles, get_vdu, get_configuration, \ + get_vdu_index, get_scaling_aspect, get_number_of_instances, get_juju_ee_ref from osm_lcm.data_utils.list_utils import find_in_list -from osm_lcm.data_utils.vnfr import get_osm_params +from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index from osm_lcm.data_utils.dict_utils import parse_yaml_strings from osm_lcm.data_utils.database.vim_account import VimAccountDB from n2vc.k8s_helm_conn import K8sHelmConnector @@ -338,7 +338,6 @@ class NsLcm(LcmBase): # remove unused by RO configuration, monitoring, scaling and internal keys vnfd_RO.pop("_id", None) vnfd_RO.pop("_admin", None) - vnfd_RO.pop("vnf-configuration", None) vnfd_RO.pop("monitoring-param", None) vnfd_RO.pop("scaling-group-descriptor", None) vnfd_RO.pop("kdu", None) @@ -646,10 +645,13 @@ class NsLcm(LcmBase): } # check if this network needs SDN assist if vld.get("pci-interfaces"): - db_vim = VimAccountDB.get_vim_account_with_id(target_vld["vim_info"][0]["vim_account_id"]) + db_vim = get_vim_account(ns_params["vimAccountId"]) sdnc_id = db_vim["config"].get("sdn-controller") if sdnc_id: - target_vld["vim_info"].append({"sdnc_id": sdnc_id}) + sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"]) + target_sdn = "sdn:{}".format(sdnc_id) + target_vld["vim_info"][target_sdn] = { + "sdn": True, "target_vim": target_vim, "vlds": [sdn_vld], "type": vld.get("type")} nsd_vnf_profiles = get_vnf_profiles(nsd) for nsd_vnf_profile in nsd_vnf_profiles: @@ -742,8 +744,8 @@ class NsLcm(LcmBase): self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all)) if ssh_keys_all: - vdu_configuration = get_vdu_configuration(vnfd, vdur["vdu-id-ref"]) - vnf_configuration = get_vnf_configuration(vnfd) + vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"]) + vnf_configuration = get_configuration(vnfd, vnfd["id"]) if vdu_configuration and vdu_configuration.get("config-access") and \ vdu_configuration.get("config-access").get("ssh-access"): vdur["ssh-keys"] = ssh_keys_all @@ -783,8 +785,23 @@ class NsLcm(LcmBase): ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])] if target_vim not in ns_flavor["vim_info"]: ns_flavor["vim_info"][target_vim] = {} - # image - ns_image = target["image"][int(vdur["ns-image-id"])] + + # deal with images + # in case alternative images are provided we must check if they should be applied + # for the vim_type, modify the vim_type taking into account + ns_image_id = int(vdur["ns-image-id"]) + if vdur.get("alt-image-ids"): + db_vim = get_vim_account(vnfr["vim-account-id"]) + vim_type = db_vim["vim_type"] + for alt_image_id in vdur.get("alt-image-ids"): + ns_alt_image = target["image"][int(alt_image_id)] + if vim_type == ns_alt_image.get("vim-type"): + # must use alternative image + self.logger.debug("use alternative image id: {}".format(alt_image_id)) + ns_image_id = alt_image_id + vdur["ns-image-id"] = ns_image_id + break + ns_image = target["image"][int(ns_image_id)] if target_vim not in ns_image["vim_info"]: ns_image["vim_info"][target_vim] = {} @@ -1146,14 +1163,14 @@ class NsLcm(LcmBase): if vnfr_id: element_type = 'VNF' element_under_configuration = vnfr_id - namespace += ".{}".format(vnfr_id) + namespace += ".{}-{}".format(vnfr_id, vdu_index or 0) if vdu_id: namespace += ".{}-{}".format(vdu_id, vdu_index or 0) element_type = 'VDU' element_under_configuration = "{}-{}".format(vdu_id, vdu_index or 0) osm_config["osm"]["vdu_id"] = vdu_id elif kdu_name: - namespace += ".{}".format(kdu_name) + namespace += ".{}.{}".format(kdu_name, vdu_index or 0) element_type = 'KDU' element_under_configuration = kdu_name osm_config["osm"]["kdu_name"] = kdu_name @@ -1202,7 +1219,7 @@ class NsLcm(LcmBase): ) step = "create execution environment" - self.logger.debug(logging_text + step) + self.logger.debug(logging_text + step) ee_id = None credentials = None @@ -1215,15 +1232,16 @@ class NsLcm(LcmBase): cloud_name=vca_k8s_cloud, credential_name=vca_k8s_cloud_credential, ) - elif vca_type == "helm" or vca_type == "helm-v3": + elif vca_type == "helm" or vca_type == "helm-v3": ee_id, credentials = await self.vca_map[vca_type].create_execution_environment( namespace=namespace, reuse_ee_id=ee_id, db_dict=db_dict, - cloud_name=vca_cloud, - credential_name=vca_cloud_credential, + config=osm_config, + artifact_path=artifact_path, + vca_type=vca_type ) - else: + else: ee_id, credentials = await self.vca_map[vca_type].create_execution_environment( namespace=namespace, reuse_ee_id=ee_id, @@ -1686,7 +1704,7 @@ class NsLcm(LcmBase): vnfd = self.db.get_one("vnfds", {"_id": vnfd_id}) # store vnfd - db_vnfds.append(vnfd) # vnfd's indexed by id + db_vnfds.append(vnfd) # Get or generates the _admin.deployed.VCA list vca_deployed_list = None @@ -1779,7 +1797,7 @@ class NsLcm(LcmBase): if db_vnfr.get("additionalParamsForVnf"): deploy_params.update(parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())) - descriptor_config = get_vnf_configuration(vnfd) + descriptor_config = get_configuration(vnfd, vnfd["id"]) if descriptor_config: self._deploy_n2vc( logging_text=logging_text + "member_vnf_index={} ".format(member_vnf_index), @@ -1804,7 +1822,7 @@ class NsLcm(LcmBase): # Deploy charms for each VDU that supports one. for vdud in get_vdu_list(vnfd): vdu_id = vdud["id"] - descriptor_config = get_vdu_configuration(vnfd, vdu_id) + descriptor_config = get_configuration(vnfd, vdu_id) vdur = find_in_list(db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id) if vdur.get("additionalParams"): @@ -1843,7 +1861,7 @@ class NsLcm(LcmBase): ) for kdud in get_kdu_list(vnfd): kdu_name = kdud["name"] - descriptor_config = get_kdu_configuration(vnfd, kdu_name) + descriptor_config = get_configuration(vnfd, kdu_name) if descriptor_config: vdu_id = None vdu_index = 0 @@ -1885,7 +1903,7 @@ class NsLcm(LcmBase): vdu_name = None # Get additional parameters - deploy_params = {"OSM": get_osm_params(db_vnfr)} + deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}} if db_nsr.get("additionalParamsForNs"): deploy_params.update(parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())) base_folder = nsd["_admin"]["storage"] @@ -2034,7 +2052,7 @@ class NsLcm(LcmBase): if db_vnfd_list: for vnfd in db_vnfd_list: db_vnfd = self.db.get_one("vnfds", {"_id": vnfd}) - db_vnf_relations = deep_get(db_vnfd, ('vnf-configuration', 'relation')) + db_vnf_relations = get_configuration(db_vnfd, db_vnfd["id"]).get("relation", []) if db_vnf_relations: for r in db_vnf_relations: # check if this VCA is in the relation @@ -2174,7 +2192,12 @@ class NsLcm(LcmBase): "filter": {"_id": nsr_id}, "path": nsr_db_path} - kdu_instance = await self.k8scluster_map[k8sclustertype].install( + kdu_instance = self.k8scluster_map[k8sclustertype].generate_kdu_instance_name( + db_dict=db_dict_install, + kdu_model=k8s_instance_info["kdu-model"], + ) + self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}) + await self.k8scluster_map[k8sclustertype].install( cluster_uuid=k8s_instance_info["k8scluster-uuid"], kdu_model=k8s_instance_info["kdu-model"], atomic=True, @@ -2182,7 +2205,9 @@ class NsLcm(LcmBase): db_dict=db_dict_install, timeout=timeout, kdu_name=k8s_instance_info["kdu-name"], - namespace=k8s_instance_info["namespace"]) + namespace=k8s_instance_info["namespace"], + kdu_instance=kdu_instance, + ) self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance}) # Obtain services to obtain management service ip @@ -2219,8 +2244,9 @@ class NsLcm(LcmBase): vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY" self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict) - kdu_config = kdud.get("kdu-configuration") - if kdu_config and kdu_config.get("initial-config-primitive") and kdu_config.get("juju") is None: + kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"]) + if kdu_config and kdu_config.get("initial-config-primitive") and \ + get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None: initial_config_primitive_list = kdu_config.get("initial-config-primitive") initial_config_primitive_list.sort(key=lambda val: int(val["seq"])) @@ -2308,7 +2334,8 @@ class NsLcm(LcmBase): # Step 0: Prepare and set parameters desc_params = parse_yaml_strings(kdur.get("additionalParams")) vnfd_id = vnfr_data.get('vnfd-id') - kdud = next(kdud for kdud in db_vnfds[vnfd_id]["kdu"] if kdud["name"] == kdur["kdu-name"]) + vnfd_with_id = find_in_list(db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id) + kdud = next(kdud for kdud in vnfd_with_id["kdu"] if kdud["name"] == kdur["kdu-name"]) namespace = kdur.get("k8s-namespace") if kdur.get("helm-chart"): kdumodel = kdur["helm-chart"] @@ -2326,7 +2353,8 @@ class NsLcm(LcmBase): format(vnfr_data["member-vnf-index-ref"], kdur["kdu-name"])) # check if kdumodel is a file and exists try: - storage = deep_get(db_vnfds.get(vnfd_id), ('_admin', 'storage')) + vnfd_with_id = find_in_list(db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id) + storage = deep_get(vnfd_with_id, ('_admin', 'storage')) if storage and storage.get('pkg-dir'): # may be not present if vnfd has not artifacts # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel filename = '{}/{}/{}s/{}'.format(storage["folder"], storage["pkg-dir"], k8sclustertype, @@ -2376,9 +2404,9 @@ class NsLcm(LcmBase): db_path = "_admin.deployed.K8s.{}".format(index) db_nsr_update[db_path] = k8s_instance_info self.update_db_2("nsrs", nsr_id, db_nsr_update) - + vnfd_with_id = find_in_list(db_vnfds, lambda vnf: vnf["_id"] == vnfd_id) task = asyncio.ensure_future( - self._install_kdu(nsr_id, db_path, vnfr_data, kdu_index, kdud, db_vnfds[vnfd_id], + self._install_kdu(nsr_id, db_path, vnfr_data, kdu_index, kdud, vnfd_with_id, k8s_instance_info, k8params=desc_params, timeout=600)) self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_KDU-{}".format(index), task) task_instantiation_info[task] = "Deploying KDU {}".format(kdur["kdu-name"]) @@ -2407,10 +2435,8 @@ class NsLcm(LcmBase): # fill db_nsr._admin.deployed.VCA. self.logger.debug(logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)) - if descriptor_config.get("juju"): # There is one execution envioronment of type juju - ee_list = [descriptor_config] - elif descriptor_config.get("execution-environment-list"): - ee_list = descriptor_config.get("execution-environment-list") + if "execution-environment-list" in descriptor_config: + ee_list = descriptor_config.get("execution-environment-list", []) else: # other types as script are not supported ee_list = [] @@ -2715,7 +2741,7 @@ class NsLcm(LcmBase): return vca["ee_id"] async def destroy_N2VC(self, logging_text, db_nslcmop, vca_deployed, config_descriptor, - vca_index, destroy_ee=True, exec_primitives=True): + vca_index, destroy_ee=True, exec_primitives=True, scaling_in=False): """ Execute the terminate primitives and destroy the execution environment (if destroy_ee=False :param logging_text: @@ -2726,6 +2752,7 @@ class NsLcm(LcmBase): :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has not executed properly + :param scaling_in: True destroys the application, False destroys the model :return: None or exception """ @@ -2783,7 +2810,7 @@ class NsLcm(LcmBase): await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"]) if destroy_ee: - await self.vca_map[vca_type].delete_execution_environment(vca_deployed["ee_id"]) + await self.vca_map[vca_type].delete_execution_environment(vca_deployed["ee_id"], scaling_in=scaling_in) async def _delete_all_N2VC(self, db_nsr: dict): self._write_all_config_status(db_nsr=db_nsr, status='TERMINATING') @@ -3018,16 +3045,13 @@ class NsLcm(LcmBase): config_descriptor = db_nsr.get("ns-configuration") elif vca.get("vdu_id"): db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]] - vdud = next((vdu for vdu in db_vnfd.get("vdu", ()) if vdu["id"] == vca.get("vdu_id")), None) - if vdud: - config_descriptor = vdud.get("vdu-configuration") + config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id")) elif vca.get("kdu_name"): db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]] - kdud = next((kdu for kdu in db_vnfd.get("kdu", ()) if kdu["name"] == vca.get("kdu_name")), None) - if kdud: - config_descriptor = kdud.get("kdu-configuration") + config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name")) else: - config_descriptor = db_vnfds_from_member_index[vca["member-vnf-index"]].get("vnf-configuration") + db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]] + config_descriptor = get_configuration(db_vnfd, db_vnfd["id"]) vca_type = vca.get("type") exec_terminate_primitives = (not operation_params.get("skip_terminate_primitives") and vca.get("needed_terminate")) @@ -3406,11 +3430,11 @@ class NsLcm(LcmBase): # look for primitive config_primitive_desc = descriptor_configuration = None if vdu_id: - descriptor_configuration = get_vdu_configuration(db_vnfd, vdu_id) + descriptor_configuration = get_configuration(db_vnfd, vdu_id) elif kdu_name: - descriptor_configuration = get_kdu_configuration(db_vnfd, kdu_name) + descriptor_configuration = get_configuration(db_vnfd, kdu_name) elif vnf_index: - descriptor_configuration = get_vnf_configuration(db_vnfd) + descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"]) else: descriptor_configuration = db_nsd.get("ns-configuration") @@ -3441,9 +3465,14 @@ class NsLcm(LcmBase): desc_params = parse_yaml_strings(db_vnfr.get("additionalParamsForVnf")) else: desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs")) - - if kdu_name and get_kdu_configuration(db_vnfd): - kdu_action = True if not get_kdu_configuration(db_vnfd)["juju"] else False + if kdu_name and get_configuration(db_vnfd, kdu_name): + kdu_configuration = get_configuration(db_vnfd, kdu_name) + actions = set() + for primitive in kdu_configuration.get("initial-config-primitive", []): + actions.add(primitive["name"]) + for primitive in kdu_configuration.get("config-primitive", []): + actions.add(primitive["name"]) + kdu_action = True if primitive_name in actions else False # TODO check if ns is in a proper status if kdu_name and (primitive_name in ("upgrade", "rollback", "status") or kdu_action): @@ -3587,6 +3616,7 @@ class NsLcm(LcmBase): logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id) stage = ['', '', ''] + tasks_dict_info = {} # ^ stage, step, VIM progress self.logger.debug(logging_text + "Enter") # get all needed from database @@ -3598,6 +3628,7 @@ class NsLcm(LcmBase): scale_process = None old_operational_status = "" old_config_status = "" + nsi_id = None try: # wait for any previous tasks in process step = "Waiting for previous operations to terminate" @@ -3627,7 +3658,6 @@ class NsLcm(LcmBase): # vdu_name = db_nslcmop["operationParams"].get("vdu_name") ####### - RO_nsr_id = nsr_deployed["RO"].get("nsr_id") vnf_index = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["member-vnf-index"] scaling_group = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["scaling-group-descriptor"] scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"] @@ -3643,11 +3673,16 @@ class NsLcm(LcmBase): step = "Getting vnfd from database" db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]}) + base_folder = db_vnfd["_admin"]["storage"] + step = "Getting scaling-group-descriptor" - for scaling_descriptor in db_vnfd["scaling-group-descriptor"]: - if scaling_descriptor["name"] == scaling_group: - break - else: + scaling_descriptor = find_in_list( + get_scaling_aspect( + db_vnfd + ), + lambda scale_desc: scale_desc["name"] == scaling_group + ) + if not scaling_descriptor: raise LcmException("input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present " "at vnfd:scaling-group-descriptor".format(scaling_group)) @@ -3666,65 +3701,116 @@ class NsLcm(LcmBase): admin_scale_index += 1 db_nsr_update["_admin.scaling-group.{}.name".format(admin_scale_index)] = scaling_group RO_scaling_info = [] + VCA_scaling_info = [] vdu_scaling_info = {"scaling_group_name": scaling_group, "vdu": []} if scaling_type == "SCALE_OUT": + if "aspect-delta-details" not in scaling_descriptor: + raise LcmException( + "Aspect delta details not fount in scaling descriptor {}".format( + scaling_descriptor["name"] + ) + ) # count if max-instance-count is reached - max_instance_count = scaling_descriptor.get("max-instance-count", 10) - # self.logger.debug("MAX_INSTANCE_COUNT is {}".format(max_instance_count)) - if nb_scale_op >= max_instance_count: - raise LcmException("reached the limit of {} (max-instance-count) " - "scaling-out operations for the " - "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group)) - - nb_scale_op += 1 + deltas = scaling_descriptor.get("aspect-delta-details")["deltas"] + vdu_scaling_info["scaling_direction"] = "OUT" vdu_scaling_info["vdu-create"] = {} - for vdu_scale_info in scaling_descriptor["vdu"]: - vdud = next(vdu for vdu in db_vnfd.get("vdu") if vdu["id"] == vdu_scale_info["vdu-id-ref"]) - vdu_index = len([x for x in db_vnfr.get("vdur", ()) - if x.get("vdu-id-ref") == vdu_scale_info["vdu-id-ref"] and - x.get("member-vnf-index-ref") == vnf_index]) - cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd) - if cloud_init_text: - additional_params = self._get_vdu_additional_params(db_vnfr, vdud["id"]) or {} - cloud_init_list = [] - for x in range(vdu_scale_info.get("count", 1)): + for delta in deltas: + for vdu_delta in delta["vdu-delta"]: + vdud = get_vdu(db_vnfd, vdu_delta["id"]) + vdu_index = get_vdur_index(db_vnfr, vdu_delta) + cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd) if cloud_init_text: - # TODO Information of its own ip is not available because db_vnfr is not updated. - additional_params["OSM"] = get_osm_params( - db_vnfr, - vdu_scale_info["vdu-id-ref"], - vdu_index + x + additional_params = self._get_vdu_additional_params(db_vnfr, vdud["id"]) or {} + cloud_init_list = [] + + vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"]) + max_instance_count = 10 + if vdu_profile and "max-number-of-instances" in vdu_profile: + max_instance_count = vdu_profile.get("max-number-of-instances", 10) + + default_instance_num = get_number_of_instances(db_vnfd, vdud["id"]) + + nb_scale_op += vdu_delta.get("number-of-instances", 1) + + if nb_scale_op + default_instance_num > max_instance_count: + raise LcmException( + "reached the limit of {} (max-instance-count) " + "scaling-out operations for the " + "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group) ) - cloud_init_list.append( - self._parse_cloud_init( - cloud_init_text, - additional_params, - db_vnfd["id"], - vdud["id"] + for x in range(vdu_delta.get("number-of-instances", 1)): + if cloud_init_text: + # TODO Information of its own ip is not available because db_vnfr is not updated. + additional_params["OSM"] = get_osm_params( + db_vnfr, + vdu_delta["id"], + vdu_index + x ) - ) - RO_scaling_info.append({"osm_vdu_id": vdu_scale_info["vdu-id-ref"], "member-vnf-index": vnf_index, - "type": "create", "count": vdu_scale_info.get("count", 1)}) - if cloud_init_list: - RO_scaling_info[-1]["cloud_init"] = cloud_init_list - vdu_scaling_info["vdu-create"][vdu_scale_info["vdu-id-ref"]] = vdu_scale_info.get("count", 1) + cloud_init_list.append( + self._parse_cloud_init( + cloud_init_text, + additional_params, + db_vnfd["id"], + vdud["id"] + ) + ) + VCA_scaling_info.append( + { + "osm_vdu_id": vdu_delta["id"], + "member-vnf-index": vnf_index, + "type": "create", + "vdu_index": vdu_index + x + } + ) + RO_scaling_info.append( + { + "osm_vdu_id": vdu_delta["id"], + "member-vnf-index": vnf_index, + "type": "create", + "count": vdu_delta.get("number-of-instances", 1) + } + ) + if cloud_init_list: + RO_scaling_info[-1]["cloud_init"] = cloud_init_list + vdu_scaling_info["vdu-create"][vdu_delta["id"]] = vdu_delta.get("number-of-instances", 1) elif scaling_type == "SCALE_IN": - # count if min-instance-count is reached - min_instance_count = 0 if "min-instance-count" in scaling_descriptor and scaling_descriptor["min-instance-count"] is not None: min_instance_count = int(scaling_descriptor["min-instance-count"]) - if nb_scale_op <= min_instance_count: - raise LcmException("reached the limit of {} (min-instance-count) scaling-in operations for the " - "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group)) - nb_scale_op -= 1 + vdu_scaling_info["scaling_direction"] = "IN" vdu_scaling_info["vdu-delete"] = {} - for vdu_scale_info in scaling_descriptor["vdu"]: - RO_scaling_info.append({"osm_vdu_id": vdu_scale_info["vdu-id-ref"], "member-vnf-index": vnf_index, - "type": "delete", "count": vdu_scale_info.get("count", 1)}) - vdu_scaling_info["vdu-delete"][vdu_scale_info["vdu-id-ref"]] = vdu_scale_info.get("count", 1) + deltas = scaling_descriptor.get("aspect-delta-details")["deltas"] + for delta in deltas: + for vdu_delta in delta["vdu-delta"]: + vdu_index = get_vdur_index(db_vnfr, vdu_delta) + min_instance_count = 0 + vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"]) + if vdu_profile and "min-number-of-instances" in vdu_profile: + min_instance_count = vdu_profile["min-number-of-instances"] + + default_instance_num = get_number_of_instances(db_vnfd, vdu_delta["id"]) + + nb_scale_op -= vdu_delta.get("number-of-instances", 1) + if nb_scale_op + default_instance_num < min_instance_count: + raise LcmException( + "reached the limit of {} (min-instance-count) scaling-in operations for the " + "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group) + ) + RO_scaling_info.append({"osm_vdu_id": vdu_delta["id"], "member-vnf-index": vnf_index, + "type": "delete", "count": vdu_delta.get("number-of-instances", 1), + "vdu_index": vdu_index - 1}) + for x in range(vdu_delta.get("number-of-instances", 1)): + VCA_scaling_info.append( + { + "osm_vdu_id": vdu_delta["id"], + "member-vnf-index": vnf_index, + "type": "delete", + "vdu_index": vdu_index - 1 - x + } + ) + vdu_scaling_info["vdu-delete"][vdu_delta["id"]] = vdu_delta.get("number-of-instances", 1) # update VDU_SCALING_INFO with the VDUs to delete ip_addresses vdu_delete = copy(vdu_scaling_info.get("vdu-delete")) @@ -3756,7 +3842,9 @@ class NsLcm(LcmBase): "executing pre-scale scaling-config-action '{}'".format(vnf_config_primitive) # look for primitive - for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()): + for config_primitive in (get_configuration( + db_vnfd, db_vnfd["id"] + ) or {}).get("config-primitive", ()): if config_primitive["name"] == vnf_config_primitive: break else: @@ -3823,20 +3911,190 @@ class NsLcm(LcmBase): db_nsr_update["_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)] = nb_scale_op db_nsr_update["_admin.scaling-group.{}.time".format(admin_scale_index)] = time() + # SCALE-IN VCA - BEGIN + if VCA_scaling_info: + step = db_nslcmop_update["detailed-status"] = \ + "Deleting the execution environments" + scale_process = "VCA" + for vdu_info in VCA_scaling_info: + if vdu_info["type"] == "delete": + member_vnf_index = str(vdu_info["member-vnf-index"]) + self.logger.debug(logging_text + "vdu info: {}".format(vdu_info)) + vdu_id = vdu_info["osm_vdu_id"] + vdu_index = int(vdu_info["vdu_index"]) + stage[1] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format( + member_vnf_index, vdu_id, vdu_index) + stage[2] = step = "Scaling in VCA" + self._write_op_status( + op_id=nslcmop_id, + stage=stage + ) + vca_update = db_nsr["_admin"]["deployed"]["VCA"] + config_update = db_nsr["configurationStatus"] + for vca_index, vca in enumerate(vca_update): + if (vca or vca.get("ee_id")) and vca["member-vnf-index"] == member_vnf_index and \ + vca["vdu_count_index"] == vdu_index: + if vca.get("vdu_id"): + config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id")) + elif vca.get("kdu_name"): + config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name")) + else: + config_descriptor = get_configuration(db_vnfd, db_vnfd["id"]) + operation_params = db_nslcmop.get("operationParams") or {} + exec_terminate_primitives = (not operation_params.get("skip_terminate_primitives") and + vca.get("needed_terminate")) + task = asyncio.ensure_future(asyncio.wait_for( + self.destroy_N2VC(logging_text, db_nslcmop, vca, config_descriptor, + vca_index, destroy_ee=True, + exec_primitives=exec_terminate_primitives, + scaling_in=True), timeout=self.timeout_charm_delete)) + # wait before next removal + await asyncio.sleep(30) + tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id")) + del vca_update[vca_index] + del config_update[vca_index] + # wait for pending tasks of terminate primitives + if tasks_dict_info: + self.logger.debug(logging_text + + 'Waiting for tasks {}'.format(list(tasks_dict_info.keys()))) + error_list = await self._wait_for_tasks(logging_text, tasks_dict_info, + min(self.timeout_charm_delete, + self.timeout_ns_terminate), + stage, nslcmop_id) + tasks_dict_info.clear() + if error_list: + raise LcmException("; ".join(error_list)) + + db_vca_and_config_update = { + "_admin.deployed.VCA": vca_update, + "configurationStatus": config_update + } + self.update_db_2("nsrs", db_nsr["_id"], db_vca_and_config_update) + scale_process = None + # SCALE-IN VCA - END + # SCALE RO - BEGIN if RO_scaling_info: scale_process = "RO" if self.ro_config.get("ng"): await self._scale_ng_ro(logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage) - else: - await self._RO_scale(logging_text, RO_nsr_id, RO_scaling_info, db_nslcmop, db_vnfr, - db_nslcmop_update, vdu_scaling_info) vdu_scaling_info.pop("vdu-create", None) vdu_scaling_info.pop("vdu-delete", None) scale_process = None if db_nsr_update: self.update_db_2("nsrs", nsr_id, db_nsr_update) + # SCALE RO - END + + # SCALE-UP VCA - BEGIN + if VCA_scaling_info: + step = db_nslcmop_update["detailed-status"] = \ + "Creating new execution environments" + scale_process = "VCA" + for vdu_info in VCA_scaling_info: + if vdu_info["type"] == "create": + member_vnf_index = str(vdu_info["member-vnf-index"]) + self.logger.debug(logging_text + "vdu info: {}".format(vdu_info)) + vnfd_id = db_vnfr["vnfd-ref"] + vdu_index = int(vdu_info["vdu_index"]) + deploy_params = {"OSM": get_osm_params(db_vnfr)} + if db_vnfr.get("additionalParamsForVnf"): + deploy_params.update(parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())) + descriptor_config = get_configuration(db_vnfd, db_vnfd["id"]) + if descriptor_config: + vdu_id = None + vdu_name = None + kdu_name = None + self._deploy_n2vc( + logging_text=logging_text + "member_vnf_index={} ".format(member_vnf_index), + db_nsr=db_nsr, + db_vnfr=db_vnfr, + nslcmop_id=nslcmop_id, + nsr_id=nsr_id, + nsi_id=nsi_id, + vnfd_id=vnfd_id, + vdu_id=vdu_id, + kdu_name=kdu_name, + member_vnf_index=member_vnf_index, + vdu_index=vdu_index, + vdu_name=vdu_name, + deploy_params=deploy_params, + descriptor_config=descriptor_config, + base_folder=base_folder, + task_instantiation_info=tasks_dict_info, + stage=stage + ) + vdu_id = vdu_info["osm_vdu_id"] + vdur = find_in_list(db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id) + descriptor_config = get_configuration(db_vnfd, vdu_id) + if vdur.get("additionalParams"): + deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"]) + else: + deploy_params_vdu = deploy_params + deploy_params_vdu["OSM"] = get_osm_params(db_vnfr, vdu_id, vdu_count_index=vdu_index) + if descriptor_config: + vdu_name = None + kdu_name = None + stage[1] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format( + member_vnf_index, vdu_id, vdu_index) + stage[2] = step = "Scaling out VCA" + self._write_op_status( + op_id=nslcmop_id, + stage=stage + ) + self._deploy_n2vc( + logging_text=logging_text + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format( + member_vnf_index, vdu_id, vdu_index), + db_nsr=db_nsr, + db_vnfr=db_vnfr, + nslcmop_id=nslcmop_id, + nsr_id=nsr_id, + nsi_id=nsi_id, + vnfd_id=vnfd_id, + vdu_id=vdu_id, + kdu_name=kdu_name, + member_vnf_index=member_vnf_index, + vdu_index=vdu_index, + vdu_name=vdu_name, + deploy_params=deploy_params_vdu, + descriptor_config=descriptor_config, + base_folder=base_folder, + task_instantiation_info=tasks_dict_info, + stage=stage + ) + # TODO: scaling for kdu is not implemented yet. + kdu_name = vdu_info["osm_vdu_id"] + descriptor_config = get_configuration(db_vnfd, kdu_name) + if descriptor_config: + vdu_id = None + vdu_index = vdu_index + vdu_name = None + kdur = next(x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name) + deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)} + if kdur.get("additionalParams"): + deploy_params_kdu = parse_yaml_strings(kdur["additionalParams"]) + + self._deploy_n2vc( + logging_text=logging_text, + db_nsr=db_nsr, + db_vnfr=db_vnfr, + nslcmop_id=nslcmop_id, + nsr_id=nsr_id, + nsi_id=nsi_id, + vnfd_id=vnfd_id, + vdu_id=vdu_id, + kdu_name=kdu_name, + member_vnf_index=member_vnf_index, + vdu_index=vdu_index, + vdu_name=vdu_name, + deploy_params=deploy_params_kdu, + descriptor_config=descriptor_config, + base_folder=base_folder, + task_instantiation_info=tasks_dict_info, + stage=stage + ) + # SCALE-UP VCA - END + scale_process = None # POST-SCALE BEGIN # execute primitive service POST-SCALING @@ -3854,7 +4112,9 @@ class NsLcm(LcmBase): vnfr_params.update(db_vnfr["additionalParamsForVnf"]) # look for primitive - for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()): + for config_primitive in ( + get_configuration(db_vnfd, db_vnfd["id"]) or {} + ).get("config-primitive", ()): if config_primitive["name"] == vnf_config_primitive: break else: @@ -3929,6 +4189,11 @@ class NsLcm(LcmBase): self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True) finally: self._write_ns_status(nsr_id=nsr_id, ns_state=None, current_operation="IDLE", current_operation_id=None) + if tasks_dict_info: + stage[1] = "Waiting for instantiate pending tasks." + self.logger.debug(logging_text + stage[1]) + exc = await self._wait_for_tasks(logging_text, tasks_dict_info, self.timeout_ns_deploy, + stage, nslcmop_id, nsr_id=nsr_id) if exc: db_nslcmop_update["detailed-status"] = error_description_nslcmop = "FAILED {}: {}".format(step, exc) nslcmop_operation_state = "FAILED" @@ -3969,18 +4234,17 @@ class NsLcm(LcmBase): db_vnfrs = {} # read from db: vnfd's for every vnf - db_vnfds = {} # every vnfd data indexed by vnf id - db_vnfds = {} + db_vnfds = [] # for each vnf in ns, read vnfd for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}): db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf # if we haven't this vnfd, read it from db - if vnfd_id not in db_vnfds: + if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id): # read from db vnfd = self.db.get_one("vnfds", {"_id": vnfd_id}) - db_vnfds[vnfd_id] = vnfd # vnfd's indexed by id + db_vnfds.append(vnfd) n2vc_key = self.n2vc.get_public_key() n2vc_key_list = [n2vc_key] self.scale_vnfr(db_vnfr, vdu_scaling_info.get("vdu-create"), vdu_scaling_info.get("vdu-delete"), @@ -3993,115 +4257,6 @@ class NsLcm(LcmBase): if vdu_scaling_info.get("vdu-delete"): self.scale_vnfr(db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False) - async def _RO_scale(self, logging_text, RO_nsr_id, RO_scaling_info, db_nslcmop, db_vnfr, db_nslcmop_update, - vdu_scaling_info): - nslcmop_id = db_nslcmop["_id"] - nsr_id = db_nslcmop["nsInstanceId"] - vdu_create = vdu_scaling_info.get("vdu-create") - vdu_delete = vdu_scaling_info.get("vdu-delete") - # Scale RO retry check: Check if this sub-operation has been executed before - op_index = self._check_or_add_scale_suboperation( - db_nslcmop, db_vnfr["member-vnf-index-ref"], None, None, 'SCALE-RO', RO_nsr_id, RO_scaling_info) - if op_index == self.SUBOPERATION_STATUS_SKIP: - # Skip sub-operation - result = 'COMPLETED' - result_detail = 'Done' - self.logger.debug(logging_text + "Skipped sub-operation RO, result {} {}".format(result, result_detail)) - else: - if op_index == self.SUBOPERATION_STATUS_NEW: - # New sub-operation: Get index of this sub-operation - op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1 - self.logger.debug(logging_text + "New sub-operation RO") - else: - # retry: Get registered params for this existing sub-operation - op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index] - RO_nsr_id = op.get('RO_nsr_id') - RO_scaling_info = op.get('RO_scaling_info') - self.logger.debug(logging_text + "Sub-operation RO retry") - - RO_desc = await self.RO.create_action("ns", RO_nsr_id, {"vdu-scaling": RO_scaling_info}) - # wait until ready - RO_nslcmop_id = RO_desc["instance_action_id"] - db_nslcmop_update["_admin.deploy.RO"] = RO_nslcmop_id - - RO_task_done = False - step = detailed_status = "Waiting for VIM to scale. RO_task_id={}.".format(RO_nslcmop_id) - detailed_status_old = None - self.logger.debug(logging_text + step) - - deployment_timeout = 1 * 3600 # One hour - while deployment_timeout > 0: - if not RO_task_done: - desc = await self.RO.show("ns", item_id_name=RO_nsr_id, extra_item="action", - extra_item_id=RO_nslcmop_id) - - # deploymentStatus - self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc) - - ns_status, ns_status_info = self.RO.check_action_status(desc) - if ns_status == "ERROR": - raise ROclient.ROClientException(ns_status_info) - elif ns_status == "BUILD": - detailed_status = step + "; {}".format(ns_status_info) - elif ns_status == "ACTIVE": - RO_task_done = True - self.scale_vnfr(db_vnfr, vdu_create=vdu_create, vdu_delete=vdu_delete) - step = detailed_status = "Waiting ns ready at RO. RO_id={}".format(RO_nsr_id) - self.logger.debug(logging_text + step) - else: - assert False, "ROclient.check_action_status returns unknown {}".format(ns_status) - else: - desc = await self.RO.show("ns", RO_nsr_id) - ns_status, ns_status_info = self.RO.check_ns_status(desc) - # deploymentStatus - self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc) - - if ns_status == "ERROR": - raise ROclient.ROClientException(ns_status_info) - elif ns_status == "BUILD": - detailed_status = step + "; {}".format(ns_status_info) - elif ns_status == "ACTIVE": - step = detailed_status = \ - "Waiting for management IP address reported by the VIM. Updating VNFRs" - try: - # nsr_deployed["nsr_ip"] = RO.get_ns_vnf_info(desc) - self.ns_update_vnfr({db_vnfr["member-vnf-index-ref"]: db_vnfr}, desc) - break - except LcmExceptionNoMgmtIP: - pass - else: - assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status) - if detailed_status != detailed_status_old: - self._update_suboperation_status( - db_nslcmop, op_index, 'COMPLETED', detailed_status) - detailed_status_old = db_nslcmop_update["detailed-status"] = detailed_status - self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update) - - await asyncio.sleep(5, loop=self.loop) - deployment_timeout -= 5 - if deployment_timeout <= 0: - self._update_suboperation_status( - db_nslcmop, nslcmop_id, op_index, 'FAILED', "Timeout when waiting for ns to get ready") - raise ROclient.ROClientException("Timeout waiting ns to be ready") - - # update VDU_SCALING_INFO with the obtained ip_addresses - if vdu_scaling_info["scaling_direction"] == "OUT": - for vdur in reversed(db_vnfr["vdur"]): - if vdu_scaling_info["vdu-create"].get(vdur["vdu-id-ref"]): - vdu_scaling_info["vdu-create"][vdur["vdu-id-ref"]] -= 1 - vdu_scaling_info["vdu"].append({ - "name": vdur["name"] or vdur.get("vdu-name"), - "vdu_id": vdur["vdu-id-ref"], - "interface": [] - }) - for interface in vdur["interfaces"]: - vdu_scaling_info["vdu"][-1]["interface"].append({ - "name": interface["name"], - "ip_address": interface["ip-address"], - "mac_address": interface.get("mac-address"), - }) - self._update_suboperation_status(db_nslcmop, op_index, 'COMPLETED', 'Done') - async def add_prometheus_metrics(self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip): if not self.prometheus: return