X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=osm_lcm%2Fns.py;h=763d2390acf8fbae9b780dd06a7b4882ce53db20;hb=3149f26911d1c239f674d5918f67ae802b0e671c;hp=c4488230f286bf1dcdb735bb0adec73e015c0f2b;hpb=a508819e00e382aca98e2ead4e8aad9cf6b8b950;p=osm%2FLCM.git diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index c448823..763d239 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -24,7 +24,7 @@ import traceback from jinja2 import Environment, Template, meta, TemplateError, TemplateNotFound, TemplateSyntaxError from osm_lcm import ROclient -from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase +from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase, deep_get from n2vc.k8s_helm_conn import K8sHelmConnector from osm_common.dbbase import DbException @@ -68,21 +68,6 @@ def populate_dict(target_dict, key_list, value): target_dict[key_list[-1]] = value -def deep_get(target_dict, key_list): - """ - Get a value from target_dict entering in the nested keys. If keys does not exist, it returns None - Example target_dict={a: {b: 5}}; key_list=[a,b] returns 5; both key_list=[a,b,c] and key_list=[f,h] return None - :param target_dict: dictionary to be read - :param key_list: list of keys to read from target_dict - :return: The wanted value if exist, None otherwise - """ - for key in key_list: - if not isinstance(target_dict, dict) or key not in target_dict: - return None - target_dict = target_dict[key] - return target_dict - - class NsLcm(LcmBase): timeout_vca_on_error = 5 * 60 # Time for charm from first time at blocked,error status to mark as failed total_deploy_timeout = 2 * 3600 # global timeout for deployment @@ -114,6 +99,8 @@ class NsLcm(LcmBase): self.vca_config['public_key'] = self.vca_config['pubkey'] if 'cacert' in self.vca_config: self.vca_config['ca_cert'] = self.vca_config['cacert'] + if 'apiproxy' in self.vca_config: + self.vca_config['api_proxy'] = self.vca_config['apiproxy'] # create N2VC connector self.n2vc = N2VCJujuConnector( @@ -124,10 +111,9 @@ class NsLcm(LcmBase): url='{}:{}'.format(self.vca_config['host'], self.vca_config['port']), username=self.vca_config.get('user', None), vca_config=self.vca_config, - on_update_db=self._on_update_n2vc_db - # TODO - # New N2VC argument - # api_proxy=vca_config.get('apiproxy') + on_update_db=self._on_update_n2vc_db, + # ca_cert=self.vca_config.get('cacert'), + # api_proxy=self.vca_config.get('apiproxy'), ) self.k8sclusterhelm = K8sHelmConnector( @@ -885,9 +871,14 @@ class NsLcm(LcmBase): ip_address = None nb_tries = 0 target_vdu_id = None + ro_retries = 0 while True: + ro_retries += 1 + if ro_retries >= 360: # 1 hour + raise LcmException("Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)) + await asyncio.sleep(10, loop=self.loop) # wait until NS is deployed at RO if not ro_nsr_id: @@ -899,23 +890,29 @@ class NsLcm(LcmBase): # get ip address if not target_vdu_id: db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id}) - if not vdu_id: + + if not vdu_id: # for the VNF case ip_address = db_vnfr.get("ip-address") if not ip_address: continue - for vdur in get_iterable(db_vnfr, "vdur"): - if (vdur["vdu-id-ref"] == vdu_id and vdur["count-index"] == vdu_index) or \ - (ip_address and vdur.get("ip-address") == ip_address): - if vdur["status"] == "ACTIVE": - target_vdu_id = vdur["vdu-id-ref"] - elif vdur["status"] == "ERROR": - raise LcmException("Cannot inject ssh-key because target VM is in error state") - break - else: + vdur = next((x for x in get_iterable(db_vnfr, "vdur") if x.get("ip-address") == ip_address), None) + else: # VDU case + vdur = next((x for x in get_iterable(db_vnfr, "vdur") + if x.get("vdu-id-ref") == vdu_id and x.get("count-index") == vdu_index), None) + + if not vdur: raise LcmException("Not found vnfr_id={}, vdu_index={}, vdu_index={}".format( vnfr_id, vdu_id, vdu_index )) + if vdur.get("status") == "ACTIVE": + ip_address = vdur.get("ip-address") + if not ip_address: + continue + target_vdu_id = vdur["vdu-id-ref"] + elif vdur.get("status") == "ERROR": + raise LcmException("Cannot inject ssh-key because target VM is in error state") + if not target_vdu_id: continue @@ -1001,35 +998,35 @@ class NsLcm(LcmBase): if is_proxy_charm: step = "create execution environment" self.logger.debug(logging_text + step) - ee_id, credentials = await self.n2vc.create_execution_environment( - namespace=namespace, - reuse_ee_id=ee_id, - db_dict=db_dict - ) - + ee_id, credentials = await self.n2vc.create_execution_environment(namespace=namespace, + reuse_ee_id=ee_id, + db_dict=db_dict) else: - step = "register execution environment" - # TODO wait until deployed by RO, when IP address has been filled. By pooling???? - credentials = {} # TODO db_credentials["ip_address"] + step = "Waiting to VM being up and getting IP address" + self.logger.debug(logging_text + step) + rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, + user=None, pub_key=None) + credentials = {"hostname": rw_mgmt_ip} # get username + username = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user")) # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were # merged. Meanwhile let's get username from initial-config-primitive - if config_descriptor.get("initial-config-primitive"): - for param in config_descriptor["initial-config-primitive"][0].get("parameter", ()): - if param["name"] == "ssh-username": - credentials["username"] = param["value"] - if config_descriptor.get("config-access") and config_descriptor["config-access"].get("ssh-access"): - if config_descriptor["config-access"]["ssh-access"].get("required"): - credentials["username"] = \ - config_descriptor["config-access"]["ssh-access"].get("default-user") - + if not username and config_descriptor.get("initial-config-primitive"): + for config_primitive in config_descriptor["initial-config-primitive"]: + for param in config_primitive.get("parameter", ()): + if param["name"] == "ssh-username": + username = param["value"] + break + if not username: + raise LcmException("Cannot determine the username neither with 'initial-config-promitive' nor with " + "'config-access.ssh-access.default-user'") + credentials["username"] = username # n2vc_redesign STEP 3.2 + + step = "register execution environment {}".format(credentials) self.logger.debug(logging_text + step) - ee_id = await self.n2vc.register_execution_environment( - credentials=credentials, - namespace=namespace, - db_dict=db_dict - ) + ee_id = await self.n2vc.register_execution_environment(credentials=credentials, namespace=namespace, + db_dict=db_dict) # for compatibility with MON/POL modules, the need model and application name at database # TODO ask to N2VC instead of assuming the format "model_name.application_name" @@ -1041,44 +1038,33 @@ class NsLcm(LcmBase): db_update_entry + "ee_id": ee_id}) # n2vc_redesign STEP 3.3 - # TODO check if already done + step = "Install configuration Software" + # TODO check if already done self.logger.debug(logging_text + step) - await self.n2vc.install_configuration_sw( - ee_id=ee_id, - artifact_path=artifact_path, - db_dict=db_dict - ) + await self.n2vc.install_configuration_sw(ee_id=ee_id, artifact_path=artifact_path, db_dict=db_dict) # if SSH access is required, then get execution environment SSH public - required = deep_get(config_descriptor, ("config-access", "ssh-access", "required")) - pub_key = None - user = None - if is_proxy_charm and required: - user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user")) - step = "Install configuration Software, getting public ssh key" - pub_key = await self.n2vc.get_ee_ssh_public__key( - ee_id=ee_id, - db_dict=db_dict - ) + if is_proxy_charm: # if native charm we have waited already to VM be UP + pub_key = None + user = None + if deep_get(config_descriptor, ("config-access", "ssh-access", "required")): + # Needed to inject a ssh key + user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user")) + step = "Install configuration Software, getting public ssh key" + pub_key = await self.n2vc.get_ee_ssh_public__key(ee_id=ee_id, db_dict=db_dict) + + step = "Insert public key into VM" + else: + step = "Waiting to VM being up and getting IP address" + self.logger.debug(logging_text + step) - step = "Insert public key into VM" - else: - step = "Waiting to VM being up and getting IP address" - self.logger.debug(logging_text + step) + # n2vc_redesign STEP 5.1 + # wait for RO (ip-address) Insert pub_key into VM + rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, + user=user, pub_key=pub_key) - # n2vc_redesign STEP 5.1 - # wait for RO (ip-address) Insert pub_key into VM - rw_mgmt_ip = await self.wait_vm_up_insert_key_ro( - logging_text=logging_text, - nsr_id=nsr_id, - vnfr_id=vnfr_id, - vdu_id=vdu_id, - vdu_index=vdu_index, - user=user, - pub_key=pub_key - ) - self.logger.debug(logging_text + ' VM_ip_address={}'.format(rw_mgmt_ip)) + self.logger.debug(logging_text + ' VM_ip_address={}'.format(rw_mgmt_ip)) # store rw_mgmt_ip in deploy params for later replacement deploy_params["rw_mgmt_ip"] = rw_mgmt_ip @@ -1103,6 +1089,7 @@ class NsLcm(LcmBase): deploy_params["ns_config_info"] = self._get_ns_config_info(vca_deployed_list) # TODO check if already done primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, deploy_params) + step = "execute primitive '{}' params '{}'".format(initial_config_primitive["name"], primitive_params_) self.logger.debug(logging_text + step) await self.n2vc.exec_primitive( @@ -1230,25 +1217,20 @@ class NsLcm(LcmBase): db_nsr_update["_admin.nsState"] = "INSTANTIATED" self.update_db_2("nsrs", nsr_id, db_nsr_update) self.logger.debug(logging_text + "Before deploy_kdus") - db_k8scluster_list = self.db.get_list("k8sclusters", {}) # Call to deploy_kdus in case exists the "vdu:kdu" param task_kdu = asyncio.ensure_future( self.deploy_kdus( logging_text=logging_text, nsr_id=nsr_id, - nsd=nsd, db_nsr=db_nsr, - db_nslcmop=db_nslcmop, db_vnfrs=db_vnfrs, - db_vnfds_ref=db_vnfds_ref, - db_k8scluster=db_k8scluster_list ) ) self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_KDUs", task_kdu) task_instantiation_list.append(task_kdu) # n2vc_redesign STEP 1 Get VCA public ssh-key # feature 1429. Add n2vc public key to needed VMs - n2vc_key = await self.n2vc.get_public_key() + n2vc_key = self.n2vc.get_public_key() n2vc_key_list = [n2vc_key] if self.vca_config.get("public_key"): n2vc_key_list.append(self.vca_config["public_key"]) @@ -1289,10 +1271,7 @@ class NsLcm(LcmBase): # Get additional parameters deploy_params = {} if db_vnfr.get("additionalParamsForVnf"): - deploy_params = db_vnfr["additionalParamsForVnf"].copy() - for k, v in deploy_params.items(): - if isinstance(v, str) and v.startswith("!!yaml "): - deploy_params[k] = yaml.safe_load(v[7:]) + deploy_params = self._format_additional_params(db_vnfr["additionalParamsForVnf"].copy()) descriptor_config = vnfd.get("vnf-configuration") if descriptor_config and descriptor_config.get("juju"): @@ -1319,6 +1298,11 @@ class NsLcm(LcmBase): for vdud in get_iterable(vnfd, 'vdu'): vdu_id = vdud["id"] descriptor_config = vdud.get('vdu-configuration') + vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None) + if vdur.get("additionalParams"): + deploy_params_vdu = self._format_additional_params(vdur["additionalParams"]) + else: + deploy_params_vdu = deploy_params if descriptor_config and descriptor_config.get("juju"): # look for vdu index in the db_vnfr["vdu"] section # for vdur_index, vdur in enumerate(db_vnfr["vdur"]): @@ -1345,7 +1329,7 @@ class NsLcm(LcmBase): member_vnf_index=member_vnf_index, vdu_index=vdu_index, vdu_name=vdu_name, - deploy_params=deploy_params, + deploy_params=deploy_params_vdu, descriptor_config=descriptor_config, base_folder=base_folder, task_instantiation_list=task_instantiation_list @@ -1400,10 +1384,7 @@ class NsLcm(LcmBase): # Get additional parameters deploy_params = {} if db_nsr.get("additionalParamsForNs"): - deploy_params = db_nsr["additionalParamsForNs"].copy() - for k, v in deploy_params.items(): - if isinstance(v, str) and v.startswith("!!yaml "): - deploy_params[k] = yaml.safe_load(v[7:]) + deploy_params = self._format_additional_params(db_nsr["additionalParamsForNs"].copy()) base_folder = nsd["_admin"]["storage"] self._deploy_n2vc( logging_text=logging_text, @@ -1500,97 +1481,98 @@ class NsLcm(LcmBase): self.logger.debug(logging_text + "Exit") self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate") - async def deploy_kdus(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds_ref, db_k8scluster): + async def deploy_kdus(self, logging_text, nsr_id, db_nsr, db_vnfrs): # Launch kdus if present in the descriptor - logging_text = "Deploy kdus: " - db_nsr_update = {} - db_nsr_update["_admin.deployed.K8s"] = [] - try: - # Look for all vnfds - # db_nsr_update["_admin.deployed.K8s"] = [] - vnf_update = [] - task_list = [] - for c_vnf in nsd.get("constituent-vnfd", ()): - vnfr = db_vnfrs[c_vnf["member-vnf-index"]] - member_vnf_index = c_vnf["member-vnf-index"] - vnfd = db_vnfds_ref[c_vnf['vnfd-id-ref']] - vnfd_ref = vnfd["id"] - desc_params = {} - step = "Checking kdu from vnf: {} - member-vnf-index: {}".format(vnfd_ref, member_vnf_index) - self.logger.debug(logging_text + step) - if vnfd.get("kdu"): - step = "vnf: {} has kdus".format(vnfd_ref) - self.logger.debug(logging_text + step) - for vnfr_name, vnfr_data in db_vnfrs.items(): - if vnfr_data["vnfd-ref"] == vnfd["id"]: - if vnfr_data.get("additionalParamsForVnf"): - desc_params = self._format_additional_params(vnfr_data["additionalParamsForVnf"]) - break - else: - raise LcmException("VNF descriptor not found with id: {}".format(vnfr_data["vnfd-ref"])) - self.logger.debug(logging_text + step) + k8scluster_id_2_uuic = {"helm-chart": {}, "juju-bundle": {}} - for kdur in vnfr.get("kdur"): - index = 0 - for k8scluster in db_k8scluster: - if kdur["k8s-cluster"]["id"] == k8scluster["_id"]: - cluster_uuid = k8scluster["cluster-uuid"] - break - else: - raise LcmException("K8scluster not found with id: {}".format(kdur["k8s-cluster"]["id"])) - self.logger.debug(logging_text + step) + def _get_cluster_id(cluster_id, cluster_type): + nonlocal k8scluster_id_2_uuic + if cluster_id in k8scluster_id_2_uuic[cluster_type]: + return k8scluster_id_2_uuic[cluster_type][cluster_id] - step = "Instantiate KDU {} in k8s cluster {}".format(kdur["kdu-name"], cluster_uuid) - self.logger.debug(logging_text + step) - for kdu in vnfd.get("kdu"): - if kdu.get("name") == kdur["kdu-name"]: - break - else: - raise LcmException("KDU not found with name: {} in VNFD {}".format(kdur["kdu-name"], - vnfd["name"])) - self.logger.debug(logging_text + step) - kdumodel = None - k8sclustertype = None - if kdu.get("helm-chart"): - kdumodel = kdu["helm-chart"] - k8sclustertype = "chart" - elif kdu.get("juju-bundle"): - kdumodel = kdu["juju-bundle"] - k8sclustertype = "juju" - k8s_instace_info = {"kdu-instance": None, "k8scluster-uuid": cluster_uuid, - "vnfr-id": vnfr["id"], "k8scluster-type": k8sclustertype, - "kdu-name": kdur["kdu-name"], "kdu-model": kdumodel} - db_nsr_update["_admin.deployed.K8s"].append(k8s_instace_info) - db_dict = {"collection": "nsrs", "filter": {"_id": nsr_id}, "path": "_admin.deployed.K8s." - "{}".format(index)} - if k8sclustertype == "chart": - task = self.k8sclusterhelm.install(cluster_uuid=cluster_uuid, kdu_model=kdumodel, - atomic=True, params=desc_params, - db_dict=db_dict, timeout=300) - else: - # TODO I need the juju connector in place - pass - task_list.append(task) - index += 1 + db_k8scluster = self.db.get_one("k8sclusters", {"_id": cluster_id}, fail_on_empty=False) + if not db_k8scluster: + raise LcmException("K8s cluster {} cannot be found".format(cluster_id)) + k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id")) + if not k8s_id: + raise LcmException("K8s cluster '{}' has not been initilized for '{}'".format(cluster_id, cluster_type)) + k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id + return k8s_id + + logging_text += "Deploy kdus: " + try: + db_nsr_update = {"_admin.deployed.K8s": []} self.update_db_2("nsrs", nsr_id, db_nsr_update) - done = None - pending = None - if len(task_list) > 0: - self.logger.debug('Waiting for terminate pending tasks...') - done, pending = await asyncio.wait(task_list, timeout=3600) - if not pending: - for fut in done: - k8s_instance = fut.result() - k8s_instace_info = {"kdu-instance": k8s_instance, "k8scluster-uuid": cluster_uuid, - "vnfr-id": vnfr["id"], "k8scluster-type": k8sclustertype, - "kdu-name": kdur["kdu-name"], "kdu-model": kdumodel} - vnf_update.append(k8s_instace_info) - self.logger.debug('All tasks finished...') - else: - self.logger.info('There are pending tasks: {}'.format(pending)) - db_nsr_update["_admin.deployed.K8s"] = vnf_update + # Look for all vnfds + pending_tasks = {} + index = 0 + for vnfr_data in db_vnfrs.values(): + for kdur in get_iterable(vnfr_data, "kdur"): + desc_params = self._format_additional_params(kdur.get("additionalParams")) + kdumodel = None + k8sclustertype = None + error_text = None + cluster_uuid = None + if kdur.get("helm-chart"): + kdumodel = kdur["helm-chart"] + k8sclustertype = "chart" + k8sclustertype_full = "helm-chart" + elif kdur.get("juju-bundle"): + kdumodel = kdur["juju-bundle"] + k8sclustertype = "juju" + k8sclustertype_full = "juju-bundle" + else: + error_text = "kdu type is neither helm-chart not juju-bundle. Maybe an old NBI version is" \ + " running" + try: + if not error_text: + cluster_uuid = _get_cluster_id(kdur["k8s-cluster"]["id"], k8sclustertype_full) + except LcmException as e: + error_text = str(e) + step = "Instantiate KDU {} in k8s cluster {}".format(kdur["kdu-name"], cluster_uuid) + + k8s_instace_info = {"kdu-instance": None, "k8scluster-uuid": cluster_uuid, + "k8scluster-type": k8sclustertype, + "kdu-name": kdur["kdu-name"], "kdu-model": kdumodel} + if error_text: + k8s_instace_info["detailed-status"] = error_text + db_nsr_update["_admin.deployed.K8s.{}".format(index)] = k8s_instace_info + self.update_db_2("nsrs", nsr_id, db_nsr_update) + if error_text: + continue + + db_dict = {"collection": "nsrs", "filter": {"_id": nsr_id}, "path": "_admin.deployed.K8s." + "{}".format(index)} + if k8sclustertype == "chart": + task = asyncio.ensure_future( + self.k8sclusterhelm.install(cluster_uuid=cluster_uuid, kdu_model=kdumodel, atomic=True, + params=desc_params, db_dict=db_dict, timeout=3600) + ) + else: + # TODO juju-bundle connector in place + pass + pending_tasks[task] = "_admin.deployed.K8s.{}.".format(index) + index += 1 + if not pending_tasks: + return + self.logger.debug(logging_text + 'Waiting for terminate pending tasks...') + pending_list = list(pending_tasks.keys()) + while pending_list: + done_list, pending_list = await asyncio.wait(pending_list, timeout=30*60, + return_when=asyncio.FIRST_COMPLETED) + if not done_list: # timeout + for task in pending_list: + db_nsr_update[pending_tasks(task) + "detailed-status"] = "Timeout" + break + for task in done_list: + exc = task.exception() + if exc: + db_nsr_update[pending_tasks[task] + "detailed-status"] = "{}".format(exc) + else: + db_nsr_update[pending_tasks[task] + "kdu-instance"] = task.result() + except Exception as e: self.logger.critical(logging_text + "Exit Exception {} while '{}': {}".format(type(e).__name__, step, e)) raise LcmException("{} Exit Exception {} while '{}': {}".format(logging_text, type(e).__name__, step, e)) @@ -1705,11 +1687,10 @@ class NsLcm(LcmBase): return nslcmop def _format_additional_params(self, params): - + params = params or {} for key, value in params.items(): if str(value).startswith("!!yaml "): params[key] = yaml.safe_load(value[7:]) - return params def _get_terminate_primitive_params(self, seq, vnf_index): @@ -2003,19 +1984,23 @@ class NsLcm(LcmBase): # Delete from k8scluster step = "delete kdus" self.logger.debug(logging_text + step) - print(nsr_deployed) + # print(nsr_deployed) if nsr_deployed: - for kdu in nsr_deployed.get("K8s"): + for kdu in nsr_deployed.get("K8s", ()): + kdu_instance = kdu.get("kdu-instance") + if not kdu_instance: + continue if kdu.get("k8scluster-type") == "chart": - task_delete_kdu_instance = asyncio.ensure_future(self.k8sclusterhelm.uninstall( - cluster_uuid=kdu.get("k8scluster-uuid"), kdu_instance=kdu.get("kdu-instance"))) + task_delete_kdu_instance = asyncio.ensure_future( + self.k8sclusterhelm.uninstall(cluster_uuid=kdu.get("k8scluster-uuid"), + kdu_instance=kdu_instance)) elif kdu.get("k8scluster-type") == "juju": # TODO Juju connector needed - pass + continue else: - msg = "k8scluster-type not defined" - raise LcmException(msg) - + self.error(logging_text + "Unknown k8s deployment type {}". + format(kdu.get("k8scluster-type"))) + continue pending_tasks.append(task_delete_kdu_instance) except LcmException as e: msg = "Failed while deleting KDUs from NS: {}".format(e) @@ -2365,14 +2350,10 @@ class NsLcm(LcmBase): break elif kdu_name: self.logger.debug(logging_text + "Checking actions in KDUs") - desc_params = {} - if vnf_index: - if db_vnfr.get("additionalParamsForVnf") and db_vnfr["additionalParamsForVnf"].\ - get("member-vnf-index") == vnf_index: - desc_params = self._format_additional_params(db_vnfr["additionalParamsForVnf"]. - get("additionalParams")) - if primitive_params: - desc_params.update(primitive_params) + kdur = next((x for x in db_vnfr["kdur"] if x["kdu_name"] == kdu_name), None) + desc_params = self._format_additional_params(kdur.get("additionalParams")) or {} + if primitive_params: + desc_params.update(primitive_params) # TODO Check if we will need something at vnf level index = 0 for kdu in get_iterable(nsr_deployed, "K8s"): @@ -2458,10 +2439,14 @@ class NsLcm(LcmBase): desc_params = {} if vnf_index: if db_vnfr.get("additionalParamsForVnf"): - desc_params.update(db_vnfr["additionalParamsForVnf"]) + desc_params = self._format_additional_params(db_vnfr["additionalParamsForVnf"]) + if vdu_id: + vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None) + if vdur.get("additionalParams"): + desc_params = self._format_additional_params(vdur["additionalParams"]) else: if db_nsr.get("additionalParamsForNs"): - desc_params.update(db_nsr["additionalParamsForNs"]) + desc_params.update(self._format_additional_params(db_nsr["additionalParamsForNs"])) # TODO check if ns is in a proper status output, detail = await self._ns_execute_primitive(