+ step = "create execution environment"
+ self.logger.debug(logging_text + step)
+ ee_id, credentials = await self.vca_map[vca_type].create_execution_environment(
+ namespace=namespace,
+ reuse_ee_id=ee_id,
+ db_dict=db_dict,
+ config=osm_config,
+ artifact_path=artifact_path,
+ vca_type=vca_type)
+
+ elif vca_type == "native_charm":
+ step = "Waiting to VM being up and getting IP address"
+ self.logger.debug(logging_text + step)
+ rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
+ user=None, pub_key=None)
+ credentials = {"hostname": rw_mgmt_ip}
+ # get username
+ username = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
+ # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
+ # merged. Meanwhile let's get username from initial-config-primitive
+ if not username and initial_config_primitive_list:
+ for config_primitive in initial_config_primitive_list:
+ for param in config_primitive.get("parameter", ()):
+ if param["name"] == "ssh-username":
+ username = param["value"]
+ break
+ if not username:
+ raise LcmException("Cannot determine the username neither with 'initial-config-primitive' nor with "
+ "'config-access.ssh-access.default-user'")
+ credentials["username"] = username
+ # n2vc_redesign STEP 3.2
+
+ self._write_configuration_status(
+ nsr_id=nsr_id,
+ vca_index=vca_index,
+ status='REGISTERING',
+ element_under_configuration=element_under_configuration,
+ element_type=element_type
+ )
+
+ step = "register execution environment {}".format(credentials)
+ self.logger.debug(logging_text + step)
+ ee_id = await self.vca_map[vca_type].register_execution_environment(
+ credentials=credentials, namespace=namespace, db_dict=db_dict)
+
+ # for compatibility with MON/POL modules, the need model and application name at database
+ # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
+ ee_id_parts = ee_id.split('.')
+ db_nsr_update = {db_update_entry + "ee_id": ee_id}
+ if len(ee_id_parts) >= 2:
+ model_name = ee_id_parts[0]
+ application_name = ee_id_parts[1]
+ db_nsr_update[db_update_entry + "model"] = model_name
+ db_nsr_update[db_update_entry + "application"] = application_name
+
+ # n2vc_redesign STEP 3.3
+ step = "Install configuration Software"
+
+ self._write_configuration_status(
+ nsr_id=nsr_id,
+ vca_index=vca_index,
+ status='INSTALLING SW',
+ element_under_configuration=element_under_configuration,
+ element_type=element_type,
+ other_update=db_nsr_update
+ )
+
+ # TODO check if already done
+ self.logger.debug(logging_text + step)
+ config = None
+ if vca_type == "native_charm":
+ config_primitive = next((p for p in initial_config_primitive_list if p["name"] == "config"), None)
+ if config_primitive:
+ config = self._map_primitive_params(
+ config_primitive,
+ {},
+ deploy_params
+ )
+ num_units = 1
+ if vca_type == "lxc_proxy_charm":
+ if element_type == "NS":
+ num_units = db_nsr.get("config-units") or 1
+ elif element_type == "VNF":
+ num_units = db_vnfr.get("config-units") or 1
+ elif element_type == "VDU":
+ for v in db_vnfr["vdur"]:
+ if vdu_id == v["vdu-id-ref"]:
+ num_units = v.get("config-units") or 1
+ break
+
+ await self.vca_map[vca_type].install_configuration_sw(
+ ee_id=ee_id,
+ artifact_path=artifact_path,
+ db_dict=db_dict,
+ config=config,
+ num_units=num_units,
+ vca_type=vca_type
+ )
+
+ # write in db flag of configuration_sw already installed
+ self.update_db_2("nsrs", nsr_id, {db_update_entry + "config_sw_installed": True})
+
+ # add relations for this VCA (wait for other peers related with this VCA)
+ await self._add_vca_relations(logging_text=logging_text, nsr_id=nsr_id,
+ vca_index=vca_index, vca_type=vca_type)
+
+ # if SSH access is required, then get execution environment SSH public
+ if vca_type in ("lxc_proxy_charm", "helm"): # if native charm we have waited already to VM be UP
+ pub_key = None
+ user = None
+ # self.logger.debug("get ssh key block")
+ if deep_get(config_descriptor, ("config-access", "ssh-access", "required")):
+ # self.logger.debug("ssh key needed")
+ # Needed to inject a ssh key
+ user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
+ step = "Install configuration Software, getting public ssh key"
+ pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(ee_id=ee_id, db_dict=db_dict)
+
+ step = "Insert public key into VM user={} ssh_key={}".format(user, pub_key)
+ else:
+ # self.logger.debug("no need to get ssh key")
+ step = "Waiting to VM being up and getting IP address"
+ self.logger.debug(logging_text + step)
+
+ # n2vc_redesign STEP 5.1
+ # wait for RO (ip-address) Insert pub_key into VM
+ if vnfr_id:
+ rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
+ user=user, pub_key=pub_key)
+ else:
+ rw_mgmt_ip = None # This is for a NS configuration
+
+ self.logger.debug(logging_text + ' VM_ip_address={}'.format(rw_mgmt_ip))
+
+ # store rw_mgmt_ip in deploy params for later replacement
+ deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
+
+ # n2vc_redesign STEP 6 Execute initial config primitive
+ step = 'execute initial config primitive'
+
+ # wait for dependent primitives execution (NS -> VNF -> VDU)
+ if initial_config_primitive_list:
+ await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
+
+ # stage, in function of element type: vdu, kdu, vnf or ns
+ my_vca = vca_deployed_list[vca_index]
+ if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
+ # VDU or KDU
+ stage[0] = 'Stage 3/5: running Day-1 primitives for VDU.'
+ elif my_vca.get("member-vnf-index"):
+ # VNF
+ stage[0] = 'Stage 4/5: running Day-1 primitives for VNF.'
+ else:
+ # NS
+ stage[0] = 'Stage 5/5: running Day-1 primitives for NS.'
+
+ self._write_configuration_status(
+ nsr_id=nsr_id,
+ vca_index=vca_index,
+ status='EXECUTING PRIMITIVE'
+ )
+
+ self._write_op_status(
+ op_id=nslcmop_id,
+ stage=stage
+ )
+
+ check_if_terminated_needed = True
+ for initial_config_primitive in initial_config_primitive_list:
+ # adding information on the vca_deployed if it is a NS execution environment
+ if not vca_deployed["member-vnf-index"]:
+ deploy_params["ns_config_info"] = json.dumps(self._get_ns_config_info(nsr_id))
+ # TODO check if already done
+ primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, deploy_params)
+
+ step = "execute primitive '{}' params '{}'".format(initial_config_primitive["name"], primitive_params_)
+ self.logger.debug(logging_text + step)
+ await self.vca_map[vca_type].exec_primitive(
+ ee_id=ee_id,
+ primitive_name=initial_config_primitive["name"],
+ params_dict=primitive_params_,
+ db_dict=db_dict
+ )
+ # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
+ if check_if_terminated_needed:
+ if config_descriptor.get('terminate-config-primitive'):
+ self.update_db_2("nsrs", nsr_id, {db_update_entry + "needed_terminate": True})
+ check_if_terminated_needed = False
+
+ # TODO register in database that primitive is done
+
+ # STEP 7 Configure metrics
+ if vca_type == "helm":
+ prometheus_jobs = await self.add_prometheus_metrics(
+ ee_id=ee_id,
+ artifact_path=artifact_path,
+ ee_config_descriptor=ee_config_descriptor,
+ vnfr_id=vnfr_id,
+ nsr_id=nsr_id,
+ target_ip=rw_mgmt_ip,
+ )
+ if prometheus_jobs:
+ self.update_db_2("nsrs", nsr_id, {db_update_entry + "prometheus_jobs": prometheus_jobs})
+
+ step = "instantiated at VCA"
+ self.logger.debug(logging_text + step)
+
+ self._write_configuration_status(
+ nsr_id=nsr_id,
+ vca_index=vca_index,
+ status='READY'
+ )
+
+ except Exception as e: # TODO not use Exception but N2VC exception
+ # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
+ if not isinstance(e, (DbException, N2VCException, LcmException, asyncio.CancelledError)):
+ self.logger.error("Exception while {} : {}".format(step, e), exc_info=True)
+ self._write_configuration_status(
+ nsr_id=nsr_id,
+ vca_index=vca_index,
+ status='BROKEN'
+ )
+ raise LcmException("{} {}".format(step, e)) from e
+
+ def _write_ns_status(self, nsr_id: str, ns_state: str, current_operation: str, current_operation_id: str,
+ error_description: str = None, error_detail: str = None, other_update: dict = None):
+ """
+ Update db_nsr fields.
+ :param nsr_id:
+ :param ns_state:
+ :param current_operation:
+ :param current_operation_id:
+ :param error_description:
+ :param error_detail:
+ :param other_update: Other required changes at database if provided, will be cleared
+ :return:
+ """
+ try:
+ db_dict = other_update or {}
+ db_dict["_admin.nslcmop"] = current_operation_id # for backward compatibility
+ db_dict["_admin.current-operation"] = current_operation_id
+ db_dict["_admin.operation-type"] = current_operation if current_operation != "IDLE" else None
+ db_dict["currentOperation"] = current_operation
+ db_dict["currentOperationID"] = current_operation_id
+ db_dict["errorDescription"] = error_description
+ db_dict["errorDetail"] = error_detail
+
+ if ns_state:
+ db_dict["nsState"] = ns_state
+ self.update_db_2("nsrs", nsr_id, db_dict)
+ except DbException as e:
+ self.logger.warn('Error writing NS status, ns={}: {}'.format(nsr_id, e))
+
+ def _write_op_status(self, op_id: str, stage: list = None, error_message: str = None, queuePosition: int = 0,
+ operation_state: str = None, other_update: dict = None):
+ try:
+ db_dict = other_update or {}
+ db_dict['queuePosition'] = queuePosition
+ if isinstance(stage, list):
+ db_dict['stage'] = stage[0]
+ db_dict['detailed-status'] = " ".join(stage)
+ elif stage is not None:
+ db_dict['stage'] = str(stage)
+
+ if error_message is not None:
+ db_dict['errorMessage'] = error_message
+ if operation_state is not None:
+ db_dict['operationState'] = operation_state
+ db_dict["statusEnteredTime"] = time()
+ self.update_db_2("nslcmops", op_id, db_dict)
+ except DbException as e:
+ self.logger.warn('Error writing OPERATION status for op_id: {} -> {}'.format(op_id, e))
+
+ def _write_all_config_status(self, db_nsr: dict, status: str):
+ try:
+ nsr_id = db_nsr["_id"]
+ # configurationStatus
+ config_status = db_nsr.get('configurationStatus')
+ if config_status:
+ db_nsr_update = {"configurationStatus.{}.status".format(index): status for index, v in
+ enumerate(config_status) if v}
+ # update status
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+
+ except DbException as e:
+ self.logger.warn('Error writing all configuration status, ns={}: {}'.format(nsr_id, e))
+
+ def _write_configuration_status(self, nsr_id: str, vca_index: int, status: str = None,
+ element_under_configuration: str = None, element_type: str = None,
+ other_update: dict = None):
+
+ # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
+ # .format(vca_index, status))
+
+ try:
+ db_path = 'configurationStatus.{}.'.format(vca_index)
+ db_dict = other_update or {}
+ if status:
+ db_dict[db_path + 'status'] = status
+ if element_under_configuration:
+ db_dict[db_path + 'elementUnderConfiguration'] = element_under_configuration
+ if element_type:
+ db_dict[db_path + 'elementType'] = element_type
+ self.update_db_2("nsrs", nsr_id, db_dict)
+ except DbException as e:
+ self.logger.warn('Error writing configuration status={}, ns={}, vca_index={}: {}'
+ .format(status, nsr_id, vca_index, e))
+
+ async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
+ """
+ Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
+ sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
+ Database is used because the result can be obtained from a different LCM worker in case of HA.
+ :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
+ :param db_nslcmop: database content of nslcmop
+ :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
+ :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
+ computed 'vim-account-id'
+ """
+ modified = False
+ nslcmop_id = db_nslcmop['_id']
+ placement_engine = deep_get(db_nslcmop, ('operationParams', 'placement-engine'))
+ if placement_engine == "PLA":
+ self.logger.debug(logging_text + "Invoke and wait for placement optimization")
+ await self.msg.aiowrite("pla", "get_placement", {'nslcmopId': nslcmop_id}, loop=self.loop)
+ db_poll_interval = 5
+ wait = db_poll_interval * 10
+ pla_result = None
+ while not pla_result and wait >= 0:
+ await asyncio.sleep(db_poll_interval)
+ wait -= db_poll_interval
+ db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
+ pla_result = deep_get(db_nslcmop, ('_admin', 'pla'))
+
+ if not pla_result:
+ raise LcmException("Placement timeout for nslcmopId={}".format(nslcmop_id))
+
+ for pla_vnf in pla_result['vnf']:
+ vnfr = db_vnfrs.get(pla_vnf['member-vnf-index'])
+ if not pla_vnf.get('vimAccountId') or not vnfr:
+ continue
+ modified = True
+ self.db.set_one("vnfrs", {"_id": vnfr["_id"]}, {"vim-account-id": pla_vnf['vimAccountId']})
+ # Modifies db_vnfrs
+ vnfr["vim-account-id"] = pla_vnf['vimAccountId']
+ return modified
+
+ def update_nsrs_with_pla_result(self, params):
+ try:
+ nslcmop_id = deep_get(params, ('placement', 'nslcmopId'))
+ self.update_db_2("nslcmops", nslcmop_id, {"_admin.pla": params.get('placement')})
+ except Exception as e:
+ self.logger.warn('Update failed for nslcmop_id={}:{}'.format(nslcmop_id, e))
+
+ async def instantiate(self, nsr_id, nslcmop_id):
+ """
+
+ :param nsr_id: ns instance to deploy
+ :param nslcmop_id: operation to run
+ :return:
+ """
+
+ # Try to lock HA task here
+ task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
+ if not task_is_locked_by_me:
+ self.logger.debug('instantiate() task is not locked by me, ns={}'.format(nsr_id))
+ return
+
+ logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
+ self.logger.debug(logging_text + "Enter")
+
+ # get all needed from database
+
+ # database nsrs record
+ db_nsr = None
+
+ # database nslcmops record
+ db_nslcmop = None
+
+ # update operation on nsrs
+ db_nsr_update = {}
+ # update operation on nslcmops
+ db_nslcmop_update = {}
+
+ nslcmop_operation_state = None
+ db_vnfrs = {} # vnf's info indexed by member-index
+ # n2vc_info = {}
+ tasks_dict_info = {} # from task to info text
+ exc = None
+ error_list = []
+ stage = ['Stage 1/5: preparation of the environment.', "Waiting for previous operations to terminate.", ""]
+ # ^ stage, step, VIM progress
+ try:
+ # wait for any previous tasks in process
+ await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
+
+ stage[1] = "Sync filesystem from database"
+ self.fs.sync() # TODO, make use of partial sync, only for the needed packages
+
+ # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
+ stage[1] = "Reading from database"
+ # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
+ db_nsr_update["detailed-status"] = "creating"
+ db_nsr_update["operational-status"] = "init"
+ self._write_ns_status(
+ nsr_id=nsr_id,
+ ns_state="BUILDING",
+ current_operation="INSTANTIATING",
+ current_operation_id=nslcmop_id,
+ other_update=db_nsr_update
+ )
+ self._write_op_status(
+ op_id=nslcmop_id,
+ stage=stage,
+ queuePosition=0
+ )
+
+ # read from db: operation
+ stage[1] = "Getting nslcmop={} from db".format(nslcmop_id)
+ db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
+ ns_params = db_nslcmop.get("operationParams")
+ if ns_params and ns_params.get("timeout_ns_deploy"):
+ timeout_ns_deploy = ns_params["timeout_ns_deploy"]
+ else:
+ timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
+
+ # read from db: ns
+ stage[1] = "Getting nsr={} from db".format(nsr_id)
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ stage[1] = "Getting nsd={} from db".format(db_nsr["nsd-id"])
+ nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
+ db_nsr["nsd"] = nsd
+ # nsr_name = db_nsr["name"] # TODO short-name??
+
+ # read from db: vnf's of this ns
+ stage[1] = "Getting vnfrs from db"
+ self.logger.debug(logging_text + stage[1])
+ db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
+
+ # read from db: vnfd's for every vnf
+ db_vnfds_ref = {} # every vnfd data indexed by vnf name
+ db_vnfds = {} # every vnfd data indexed by vnf id
+ db_vnfds_index = {} # every vnfd data indexed by vnf member-index
+
+ # for each vnf in ns, read vnfd
+ for vnfr in db_vnfrs_list:
+ db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr # vnf's dict indexed by member-index: '1', '2', etc
+ vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
+ vnfd_ref = vnfr["vnfd-ref"] # vnfd name for this vnf
+ # if we haven't this vnfd, read it from db
+ if vnfd_id not in db_vnfds:
+ # read from db
+ stage[1] = "Getting vnfd={} id='{}' from db".format(vnfd_id, vnfd_ref)
+ self.logger.debug(logging_text + stage[1])
+ vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
+
+ # store vnfd
+ db_vnfds_ref[vnfd_ref] = vnfd # vnfd's indexed by name
+ db_vnfds[vnfd_id] = vnfd # vnfd's indexed by id
+ db_vnfds_index[vnfr["member-vnf-index-ref"]] = db_vnfds[vnfd_id] # vnfd's indexed by member-index
+
+ # Get or generates the _admin.deployed.VCA list
+ vca_deployed_list = None
+ if db_nsr["_admin"].get("deployed"):
+ vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
+ if vca_deployed_list is None:
+ vca_deployed_list = []
+ configuration_status_list = []
+ db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
+ db_nsr_update["configurationStatus"] = configuration_status_list
+ # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
+ populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
+ elif isinstance(vca_deployed_list, dict):
+ # maintain backward compatibility. Change a dict to list at database
+ vca_deployed_list = list(vca_deployed_list.values())
+ db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
+ populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
+
+ if not isinstance(deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list):
+ populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
+ db_nsr_update["_admin.deployed.RO.vnfd"] = []
+
+ # set state to INSTANTIATED. When instantiated NBI will not delete directly
+ db_nsr_update["_admin.nsState"] = "INSTANTIATED"
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+
+ # n2vc_redesign STEP 2 Deploy Network Scenario
+ stage[0] = 'Stage 2/5: deployment of KDUs, VMs and execution environments.'
+ self._write_op_status(
+ op_id=nslcmop_id,
+ stage=stage
+ )
+
+ stage[1] = "Deploying KDUs,"
+ # self.logger.debug(logging_text + "Before deploy_kdus")
+ # Call to deploy_kdus in case exists the "vdu:kdu" param
+ await self.deploy_kdus(
+ logging_text=logging_text,
+ nsr_id=nsr_id,
+ nslcmop_id=nslcmop_id,
+ db_vnfrs=db_vnfrs,
+ db_vnfds=db_vnfds,
+ task_instantiation_info=tasks_dict_info,
+ )
+
+ stage[1] = "Getting VCA public key."
+ # n2vc_redesign STEP 1 Get VCA public ssh-key
+ # feature 1429. Add n2vc public key to needed VMs
+ n2vc_key = self.n2vc.get_public_key()
+ n2vc_key_list = [n2vc_key]
+ if self.vca_config.get("public_key"):
+ n2vc_key_list.append(self.vca_config["public_key"])
+
+ stage[1] = "Deploying NS at VIM."
+ task_ro = asyncio.ensure_future(
+ self.instantiate_RO(
+ logging_text=logging_text,
+ nsr_id=nsr_id,
+ nsd=nsd,
+ db_nsr=db_nsr,
+ db_nslcmop=db_nslcmop,
+ db_vnfrs=db_vnfrs,
+ db_vnfds_ref=db_vnfds_ref,
+ n2vc_key_list=n2vc_key_list,
+ stage=stage
+ )
+ )
+ self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
+ tasks_dict_info[task_ro] = "Deploying at VIM"
+
+ # n2vc_redesign STEP 3 to 6 Deploy N2VC
+ stage[1] = "Deploying Execution Environments."
+ self.logger.debug(logging_text + stage[1])
+
+ nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
+ # get_iterable() returns a value from a dict or empty tuple if key does not exist
+ for c_vnf in get_iterable(nsd, "constituent-vnfd"):
+ vnfd_id = c_vnf["vnfd-id-ref"]
+ vnfd = db_vnfds_ref[vnfd_id]
+ member_vnf_index = str(c_vnf["member-vnf-index"])
+ db_vnfr = db_vnfrs[member_vnf_index]
+ base_folder = vnfd["_admin"]["storage"]
+ vdu_id = None
+ vdu_index = 0
+ vdu_name = None
+ kdu_name = None
+
+ # Get additional parameters
+ deploy_params = {}
+ if db_vnfr.get("additionalParamsForVnf"):
+ deploy_params = self._format_additional_params(db_vnfr["additionalParamsForVnf"].copy())
+
+ descriptor_config = vnfd.get("vnf-configuration")
+ if descriptor_config:
+ self._deploy_n2vc(
+ logging_text=logging_text + "member_vnf_index={} ".format(member_vnf_index),
+ db_nsr=db_nsr,
+ db_vnfr=db_vnfr,
+ nslcmop_id=nslcmop_id,
+ nsr_id=nsr_id,
+ nsi_id=nsi_id,
+ vnfd_id=vnfd_id,
+ vdu_id=vdu_id,
+ kdu_name=kdu_name,
+ member_vnf_index=member_vnf_index,
+ vdu_index=vdu_index,
+ vdu_name=vdu_name,
+ deploy_params=deploy_params,
+ descriptor_config=descriptor_config,
+ base_folder=base_folder,
+ task_instantiation_info=tasks_dict_info,
+ stage=stage
+ )
+
+ # Deploy charms for each VDU that supports one.
+ for vdud in get_iterable(vnfd, 'vdu'):
+ vdu_id = vdud["id"]
+ descriptor_config = vdud.get('vdu-configuration')
+ vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
+ if vdur.get("additionalParams"):
+ deploy_params_vdu = self._format_additional_params(vdur["additionalParams"])
+ else:
+ deploy_params_vdu = deploy_params
+ if descriptor_config:
+ # look for vdu index in the db_vnfr["vdu"] section
+ # for vdur_index, vdur in enumerate(db_vnfr["vdur"]):
+ # if vdur["vdu-id-ref"] == vdu_id:
+ # break
+ # else:
+ # raise LcmException("Mismatch vdu_id={} not found in the vnfr['vdur'] list for "
+ # "member_vnf_index={}".format(vdu_id, member_vnf_index))
+ # vdu_name = vdur.get("name")
+ vdu_name = None
+ kdu_name = None
+ for vdu_index in range(int(vdud.get("count", 1))):
+ # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
+ self._deploy_n2vc(
+ logging_text=logging_text + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
+ member_vnf_index, vdu_id, vdu_index),
+ db_nsr=db_nsr,
+ db_vnfr=db_vnfr,
+ nslcmop_id=nslcmop_id,
+ nsr_id=nsr_id,
+ nsi_id=nsi_id,
+ vnfd_id=vnfd_id,
+ vdu_id=vdu_id,
+ kdu_name=kdu_name,
+ member_vnf_index=member_vnf_index,
+ vdu_index=vdu_index,
+ vdu_name=vdu_name,
+ deploy_params=deploy_params_vdu,
+ descriptor_config=descriptor_config,
+ base_folder=base_folder,
+ task_instantiation_info=tasks_dict_info,
+ stage=stage
+ )
+ for kdud in get_iterable(vnfd, 'kdu'):
+ kdu_name = kdud["name"]
+ descriptor_config = kdud.get('kdu-configuration')
+ if descriptor_config:
+ vdu_id = None
+ vdu_index = 0
+ vdu_name = None
+ # look for vdu index in the db_vnfr["vdu"] section
+ # for vdur_index, vdur in enumerate(db_vnfr["vdur"]):
+ # if vdur["vdu-id-ref"] == vdu_id:
+ # break
+ # else:
+ # raise LcmException("Mismatch vdu_id={} not found in the vnfr['vdur'] list for "
+ # "member_vnf_index={}".format(vdu_id, member_vnf_index))
+ # vdu_name = vdur.get("name")
+ # vdu_name = None
+
+ self._deploy_n2vc(
+ logging_text=logging_text,
+ db_nsr=db_nsr,
+ db_vnfr=db_vnfr,
+ nslcmop_id=nslcmop_id,
+ nsr_id=nsr_id,
+ nsi_id=nsi_id,
+ vnfd_id=vnfd_id,
+ vdu_id=vdu_id,
+ kdu_name=kdu_name,
+ member_vnf_index=member_vnf_index,
+ vdu_index=vdu_index,
+ vdu_name=vdu_name,
+ deploy_params=deploy_params,
+ descriptor_config=descriptor_config,
+ base_folder=base_folder,
+ task_instantiation_info=tasks_dict_info,
+ stage=stage
+ )
+
+ # Check if this NS has a charm configuration
+ descriptor_config = nsd.get("ns-configuration")
+ if descriptor_config and descriptor_config.get("juju"):
+ vnfd_id = None
+ db_vnfr = None
+ member_vnf_index = None
+ vdu_id = None
+ kdu_name = None
+ vdu_index = 0
+ vdu_name = None
+
+ # Get additional parameters
+ deploy_params = {}
+ if db_nsr.get("additionalParamsForNs"):
+ deploy_params = self._format_additional_params(db_nsr["additionalParamsForNs"].copy())
+ base_folder = nsd["_admin"]["storage"]
+ self._deploy_n2vc(
+ logging_text=logging_text,
+ db_nsr=db_nsr,
+ db_vnfr=db_vnfr,
+ nslcmop_id=nslcmop_id,
+ nsr_id=nsr_id,
+ nsi_id=nsi_id,
+ vnfd_id=vnfd_id,
+ vdu_id=vdu_id,
+ kdu_name=kdu_name,
+ member_vnf_index=member_vnf_index,
+ vdu_index=vdu_index,
+ vdu_name=vdu_name,
+ deploy_params=deploy_params,
+ descriptor_config=descriptor_config,
+ base_folder=base_folder,
+ task_instantiation_info=tasks_dict_info,
+ stage=stage
+ )
+
+ # rest of staff will be done at finally
+
+ except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
+ self.logger.error(logging_text + "Exit Exception while '{}': {}".format(stage[1], e))
+ exc = e
+ except asyncio.CancelledError:
+ self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
+ exc = "Operation was cancelled"
+ except Exception as e:
+ exc = traceback.format_exc()
+ self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
+ finally:
+ if exc:
+ error_list.append(str(exc))
+ try:
+ # wait for pending tasks
+ if tasks_dict_info:
+ stage[1] = "Waiting for instantiate pending tasks."
+ self.logger.debug(logging_text + stage[1])
+ error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_deploy,
+ stage, nslcmop_id, nsr_id=nsr_id)
+ stage[1] = stage[2] = ""
+ except asyncio.CancelledError:
+ error_list.append("Cancelled")
+ # TODO cancel all tasks
+ except Exception as exc:
+ error_list.append(str(exc))
+
+ # update operation-status
+ db_nsr_update["operational-status"] = "running"
+ # let's begin with VCA 'configured' status (later we can change it)
+ db_nsr_update["config-status"] = "configured"
+ for task, task_name in tasks_dict_info.items():
+ if not task.done() or task.cancelled() or task.exception():
+ if task_name.startswith(self.task_name_deploy_vca):
+ # A N2VC task is pending
+ db_nsr_update["config-status"] = "failed"
+ else:
+ # RO or KDU task is pending
+ db_nsr_update["operational-status"] = "failed"
+
+ # update status at database
+ if error_list:
+ error_detail = ". ".join(error_list)
+ self.logger.error(logging_text + error_detail)
+ error_description_nslcmop = 'Stage: {}. Detail: {}'.format(stage[0], error_detail)
+ error_description_nsr = 'Operation: INSTANTIATING.{}, Stage {}'.format(nslcmop_id, stage[0])
+
+ db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
+ db_nslcmop_update["detailed-status"] = error_detail
+ nslcmop_operation_state = "FAILED"
+ ns_state = "BROKEN"
+ else:
+ error_detail = None
+ error_description_nsr = error_description_nslcmop = None
+ ns_state = "READY"
+ db_nsr_update["detailed-status"] = "Done"
+ db_nslcmop_update["detailed-status"] = "Done"
+ nslcmop_operation_state = "COMPLETED"
+
+ if db_nsr:
+ self._write_ns_status(
+ nsr_id=nsr_id,
+ ns_state=ns_state,
+ current_operation="IDLE",
+ current_operation_id=None,
+ error_description=error_description_nsr,
+ error_detail=error_detail,
+ other_update=db_nsr_update
+ )
+ self._write_op_status(
+ op_id=nslcmop_id,
+ stage="",
+ error_message=error_description_nslcmop,
+ operation_state=nslcmop_operation_state,
+ other_update=db_nslcmop_update,
+ )
+
+ if nslcmop_operation_state:
+ try:
+ await self.msg.aiowrite("ns", "instantiated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
+ "operationState": nslcmop_operation_state},
+ loop=self.loop)
+ except Exception as e:
+ self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
+
+ self.logger.debug(logging_text + "Exit")
+ self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
+
+ async def _add_vca_relations(self, logging_text, nsr_id, vca_index: int,
+ timeout: int = 3600, vca_type: str = None) -> bool:
+
+ # steps:
+ # 1. find all relations for this VCA
+ # 2. wait for other peers related
+ # 3. add relations
+
+ try:
+ vca_type = vca_type or "lxc_proxy_charm"
+
+ # STEP 1: find all relations for this VCA
+
+ # read nsr record
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
+
+ # this VCA data
+ my_vca = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))[vca_index]
+
+ # read all ns-configuration relations
+ ns_relations = list()
+ db_ns_relations = deep_get(nsd, ('ns-configuration', 'relation'))
+ if db_ns_relations:
+ for r in db_ns_relations:
+ # check if this VCA is in the relation
+ if my_vca.get('member-vnf-index') in\
+ (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
+ ns_relations.append(r)
+
+ # read all vnf-configuration relations
+ vnf_relations = list()
+ db_vnfd_list = db_nsr.get('vnfd-id')
+ if db_vnfd_list:
+ for vnfd in db_vnfd_list:
+ db_vnfd = self.db.get_one("vnfds", {"_id": vnfd})
+ db_vnf_relations = deep_get(db_vnfd, ('vnf-configuration', 'relation'))
+ if db_vnf_relations:
+ for r in db_vnf_relations:
+ # check if this VCA is in the relation
+ if my_vca.get('vdu_id') in (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
+ vnf_relations.append(r)
+
+ # if no relations, terminate
+ if not ns_relations and not vnf_relations:
+ self.logger.debug(logging_text + ' No relations')
+ return True
+
+ self.logger.debug(logging_text + ' adding relations\n {}\n {}'.format(ns_relations, vnf_relations))
+
+ # add all relations
+ start = time()
+ while True:
+ # check timeout
+ now = time()
+ if now - start >= timeout:
+ self.logger.error(logging_text + ' : timeout adding relations')
+ return False
+
+ # reload nsr from database (we need to update record: _admin.deloyed.VCA)
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+
+ # for each defined NS relation, find the VCA's related
+ for r in ns_relations:
+ from_vca_ee_id = None
+ to_vca_ee_id = None
+ from_vca_endpoint = None
+ to_vca_endpoint = None
+ vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
+ for vca in vca_list:
+ if vca.get('member-vnf-index') == r.get('entities')[0].get('id') \
+ and vca.get('config_sw_installed'):
+ from_vca_ee_id = vca.get('ee_id')
+ from_vca_endpoint = r.get('entities')[0].get('endpoint')
+ if vca.get('member-vnf-index') == r.get('entities')[1].get('id') \
+ and vca.get('config_sw_installed'):
+ to_vca_ee_id = vca.get('ee_id')
+ to_vca_endpoint = r.get('entities')[1].get('endpoint')
+ if from_vca_ee_id and to_vca_ee_id:
+ # add relation
+ await self.vca_map[vca_type].add_relation(
+ ee_id_1=from_vca_ee_id,
+ ee_id_2=to_vca_ee_id,
+ endpoint_1=from_vca_endpoint,
+ endpoint_2=to_vca_endpoint)
+ # remove entry from relations list
+ ns_relations.remove(r)
+ else:
+ # check failed peers
+ try:
+ vca_status_list = db_nsr.get('configurationStatus')
+ if vca_status_list:
+ for i in range(len(vca_list)):
+ vca = vca_list[i]
+ vca_status = vca_status_list[i]
+ if vca.get('member-vnf-index') == r.get('entities')[0].get('id'):
+ if vca_status.get('status') == 'BROKEN':
+ # peer broken: remove relation from list
+ ns_relations.remove(r)
+ if vca.get('member-vnf-index') == r.get('entities')[1].get('id'):
+ if vca_status.get('status') == 'BROKEN':
+ # peer broken: remove relation from list
+ ns_relations.remove(r)
+ except Exception:
+ # ignore
+ pass
+
+ # for each defined VNF relation, find the VCA's related
+ for r in vnf_relations:
+ from_vca_ee_id = None
+ to_vca_ee_id = None
+ from_vca_endpoint = None
+ to_vca_endpoint = None
+ vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
+ for vca in vca_list:
+ if vca.get('vdu_id') == r.get('entities')[0].get('id') and vca.get('config_sw_installed'):
+ from_vca_ee_id = vca.get('ee_id')
+ from_vca_endpoint = r.get('entities')[0].get('endpoint')
+ if vca.get('vdu_id') == r.get('entities')[1].get('id') and vca.get('config_sw_installed'):
+ to_vca_ee_id = vca.get('ee_id')
+ to_vca_endpoint = r.get('entities')[1].get('endpoint')
+ if from_vca_ee_id and to_vca_ee_id:
+ # add relation
+ await self.vca_map[vca_type].add_relation(
+ ee_id_1=from_vca_ee_id,
+ ee_id_2=to_vca_ee_id,
+ endpoint_1=from_vca_endpoint,
+ endpoint_2=to_vca_endpoint)
+ # remove entry from relations list
+ vnf_relations.remove(r)
+ else:
+ # check failed peers
+ try:
+ vca_status_list = db_nsr.get('configurationStatus')
+ if vca_status_list:
+ for i in range(len(vca_list)):
+ vca = vca_list[i]
+ vca_status = vca_status_list[i]
+ if vca.get('vdu_id') == r.get('entities')[0].get('id'):
+ if vca_status.get('status') == 'BROKEN':
+ # peer broken: remove relation from list
+ ns_relations.remove(r)
+ if vca.get('vdu_id') == r.get('entities')[1].get('id'):
+ if vca_status.get('status') == 'BROKEN':
+ # peer broken: remove relation from list
+ ns_relations.remove(r)
+ except Exception:
+ # ignore
+ pass
+
+ # wait for next try
+ await asyncio.sleep(5.0)
+
+ if not ns_relations and not vnf_relations:
+ self.logger.debug('Relations added')
+ break
+
+ return True
+
+ except Exception as e:
+ self.logger.warn(logging_text + ' ERROR adding relations: {}'.format(e))
+ return False
+
+ def _write_db_callback(self, task, item, _id, on_done=None, on_exc=None):
+ """
+ callback for kdu install intended to store the returned kdu_instance at database
+ :return: None
+ """
+ db_update = {}
+ try:
+ result = task.result()
+ if on_done:
+ db_update[on_done] = str(result)
+ except Exception as e:
+ if on_exc:
+ db_update[on_exc] = str(e)
+ if db_update:
+ try:
+ self.update_db_2(item, _id, db_update)
+ except Exception:
+ pass
+
+ async def deploy_kdus(self, logging_text, nsr_id, nslcmop_id, db_vnfrs, db_vnfds, task_instantiation_info):
+ # Launch kdus if present in the descriptor
+
+ k8scluster_id_2_uuic = {"helm-chart": {}, "juju-bundle": {}}
+
+ def _get_cluster_id(cluster_id, cluster_type):
+ nonlocal k8scluster_id_2_uuic
+ if cluster_id in k8scluster_id_2_uuic[cluster_type]:
+ return k8scluster_id_2_uuic[cluster_type][cluster_id]
+
+ db_k8scluster = self.db.get_one("k8sclusters", {"_id": cluster_id}, fail_on_empty=False)
+ if not db_k8scluster:
+ raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
+ k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
+ if not k8s_id:
+ raise LcmException("K8s cluster '{}' has not been initilized for '{}'".format(cluster_id, cluster_type))
+ k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
+ return k8s_id
+
+ logging_text += "Deploy kdus: "
+ step = ""
+ try:
+ db_nsr_update = {"_admin.deployed.K8s": []}
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+
+ index = 0
+ updated_cluster_list = []
+
+ for vnfr_data in db_vnfrs.values():
+ for kdur in get_iterable(vnfr_data, "kdur"):
+ desc_params = self._format_additional_params(kdur.get("additionalParams"))
+ vnfd_id = vnfr_data.get('vnfd-id')
+ namespace = kdur.get("k8s-namespace")
+ if kdur.get("helm-chart"):
+ kdumodel = kdur["helm-chart"]
+ k8sclustertype = "helm-chart"
+ elif kdur.get("juju-bundle"):
+ kdumodel = kdur["juju-bundle"]
+ k8sclustertype = "juju-bundle"
+ else:
+ raise LcmException("kdu type for kdu='{}.{}' is neither helm-chart nor "
+ "juju-bundle. Maybe an old NBI version is running".
+ format(vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]))
+ # check if kdumodel is a file and exists
+ try:
+ storage = deep_get(db_vnfds.get(vnfd_id), ('_admin', 'storage'))
+ if storage and storage.get('pkg-dir'): # may be not present if vnfd has not artifacts
+ # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
+ filename = '{}/{}/{}s/{}'.format(storage["folder"], storage["pkg-dir"], k8sclustertype,
+ kdumodel)
+ if self.fs.file_exists(filename, mode='file') or self.fs.file_exists(filename, mode='dir'):
+ kdumodel = self.fs.path + filename
+ except (asyncio.TimeoutError, asyncio.CancelledError):
+ raise
+ except Exception: # it is not a file
+ pass
+
+ k8s_cluster_id = kdur["k8s-cluster"]["id"]
+ step = "Synchronize repos for k8s cluster '{}'".format(k8s_cluster_id)
+ cluster_uuid = _get_cluster_id(k8s_cluster_id, k8sclustertype)
+
+ if k8sclustertype == "helm-chart" and cluster_uuid not in updated_cluster_list:
+ del_repo_list, added_repo_dict = await asyncio.ensure_future(
+ self.k8sclusterhelm.synchronize_repos(cluster_uuid=cluster_uuid))
+ if del_repo_list or added_repo_dict:
+ unset = {'_admin.helm_charts_added.' + item: None for item in del_repo_list}
+ updated = {'_admin.helm_charts_added.' +
+ item: name for item, name in added_repo_dict.items()}
+ self.logger.debug(logging_text + "repos synchronized on k8s cluster '{}' to_delete: {}, "
+ "to_add: {}".format(k8s_cluster_id, del_repo_list,
+ added_repo_dict))
+ self.db.set_one("k8sclusters", {"_id": k8s_cluster_id}, updated, unset=unset)
+ updated_cluster_list.append(cluster_uuid)
+
+ step = "Instantiating KDU {}.{} in k8s cluster {}".format(vnfr_data["member-vnf-index-ref"],
+ kdur["kdu-name"], k8s_cluster_id)
+
+ k8s_instace_info = {"kdu-instance": None,
+ "k8scluster-uuid": cluster_uuid,
+ "k8scluster-type": k8sclustertype,
+ "member-vnf-index": vnfr_data["member-vnf-index-ref"],
+ "kdu-name": kdur["kdu-name"],
+ "kdu-model": kdumodel,
+ "namespace": namespace}
+ db_path = "_admin.deployed.K8s.{}".format(index)
+ db_nsr_update[db_path] = k8s_instace_info
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+
+ db_dict = {"collection": "nsrs",
+ "filter": {"_id": nsr_id},
+ "path": db_path}
+
+ task = asyncio.ensure_future(
+ self.k8scluster_map[k8sclustertype].install(cluster_uuid=cluster_uuid, kdu_model=kdumodel,
+ atomic=True, params=desc_params,
+ db_dict=db_dict, timeout=600,
+ kdu_name=kdur["kdu-name"], namespace=namespace))
+
+ task.add_done_callback(partial(self._write_db_callback, item="nsrs", _id=nsr_id,
+ on_done=db_path + ".kdu-instance",
+ on_exc=db_path + ".detailed-status"))
+ self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_KDU-{}".format(index), task)
+ task_instantiation_info[task] = "Deploying KDU {}".format(kdur["kdu-name"])
+
+ index += 1
+
+ except (LcmException, asyncio.CancelledError):
+ raise
+ except Exception as e:
+ msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
+ if isinstance(e, (N2VCException, DbException)):
+ self.logger.error(logging_text + msg)
+ else:
+ self.logger.critical(logging_text + msg, exc_info=True)
+ raise LcmException(msg)
+ finally:
+ if db_nsr_update:
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+
+ def _deploy_n2vc(self, logging_text, db_nsr, db_vnfr, nslcmop_id, nsr_id, nsi_id, vnfd_id, vdu_id,
+ kdu_name, member_vnf_index, vdu_index, vdu_name, deploy_params, descriptor_config,
+ base_folder, task_instantiation_info, stage):
+ # launch instantiate_N2VC in a asyncio task and register task object
+ # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
+ # if not found, create one entry and update database
+ # fill db_nsr._admin.deployed.VCA.<index>
+
+ self.logger.debug(logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id))
+ if descriptor_config.get("juju"): # There is one execution envioronment of type juju
+ ee_list = [descriptor_config]
+ elif descriptor_config.get("execution-environment-list"):
+ ee_list = descriptor_config.get("execution-environment-list")
+ else: # other types as script are not supported
+ ee_list = []
+
+ for ee_item in ee_list:
+ self.logger.debug(logging_text + "_deploy_n2vc ee_item juju={}, helm={}".format(ee_item.get('juju'),
+ ee_item.get("helm-chart")))
+ ee_descriptor_id = ee_item.get("id")
+ if ee_item.get("juju"):
+ vca_name = ee_item['juju'].get('charm')
+ vca_type = "lxc_proxy_charm" if ee_item['juju'].get('charm') is not None else "native_charm"
+ if ee_item['juju'].get('cloud') == "k8s":
+ vca_type = "k8s_proxy_charm"
+ elif ee_item['juju'].get('proxy') is False:
+ vca_type = "native_charm"
+ elif ee_item.get("helm-chart"):
+ vca_name = ee_item['helm-chart']
+ vca_type = "helm"
+ else:
+ self.logger.debug(logging_text + "skipping non juju neither charm configuration")
+ continue
+
+ vca_index = -1
+ for vca_index, vca_deployed in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
+ if not vca_deployed:
+ continue
+ if vca_deployed.get("member-vnf-index") == member_vnf_index and \
+ vca_deployed.get("vdu_id") == vdu_id and \
+ vca_deployed.get("kdu_name") == kdu_name and \
+ vca_deployed.get("vdu_count_index", 0) == vdu_index and \
+ vca_deployed.get("ee_descriptor_id") == ee_descriptor_id:
+ break
+ else:
+ # not found, create one.
+ target = "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
+ if vdu_id:
+ target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
+ elif kdu_name:
+ target += "/kdu/{}".format(kdu_name)
+ vca_deployed = {
+ "target_element": target,
+ # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
+ "member-vnf-index": member_vnf_index,
+ "vdu_id": vdu_id,
+ "kdu_name": kdu_name,
+ "vdu_count_index": vdu_index,
+ "operational-status": "init", # TODO revise
+ "detailed-status": "", # TODO revise
+ "step": "initial-deploy", # TODO revise
+ "vnfd_id": vnfd_id,
+ "vdu_name": vdu_name,
+ "type": vca_type,
+ "ee_descriptor_id": ee_descriptor_id
+ }
+ vca_index += 1
+
+ # create VCA and configurationStatus in db
+ db_dict = {
+ "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
+ "configurationStatus.{}".format(vca_index): dict()
+ }
+ self.update_db_2("nsrs", nsr_id, db_dict)
+
+ db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
+
+ # Launch task
+ task_n2vc = asyncio.ensure_future(
+ self.instantiate_N2VC(
+ logging_text=logging_text,
+ vca_index=vca_index,
+ nsi_id=nsi_id,
+ db_nsr=db_nsr,
+ db_vnfr=db_vnfr,
+ vdu_id=vdu_id,
+ kdu_name=kdu_name,
+ vdu_index=vdu_index,
+ deploy_params=deploy_params,
+ config_descriptor=descriptor_config,
+ base_folder=base_folder,
+ nslcmop_id=nslcmop_id,
+ stage=stage,
+ vca_type=vca_type,
+ vca_name=vca_name,
+ ee_config_descriptor=ee_item
+ )
+ )
+ self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_N2VC-{}".format(vca_index), task_n2vc)
+ task_instantiation_info[task_n2vc] = self.task_name_deploy_vca + " {}.{}".format(
+ member_vnf_index or "", vdu_id or "")
+
+ @staticmethod
+ def _get_terminate_config_primitive(primitive_list, vca_deployed):
+ """ Get a sorted terminate config primitive list. In case ee_descriptor_id is present at vca_deployed,
+ it get only those primitives for this execution envirom"""
+
+ primitive_list = primitive_list or []
+ # filter primitives by ee_descriptor_id
+ ee_descriptor_id = vca_deployed.get("ee_descriptor_id")
+ primitive_list = [p for p in primitive_list if p.get("execution-environment-ref") == ee_descriptor_id]
+
+ if primitive_list:
+ primitive_list.sort(key=lambda val: int(val['seq']))
+
+ return primitive_list
+
+ @staticmethod