+ ee_id, credentials = await self.vca_map[
+ vca_type
+ ].create_execution_environment(
+ namespace=namespace,
+ reuse_ee_id=ee_id,
+ db_dict=db_dict,
+ vca_id=vca_id,
+ )
+
+ elif vca_type == "native_charm":
+ step = "Waiting to VM being up and getting IP address"
+ self.logger.debug(logging_text + step)
+ rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
+ logging_text,
+ nsr_id,
+ vnfr_id,
+ vdu_id,
+ vdu_index,
+ user=None,
+ pub_key=None,
+ )
+ credentials = {"hostname": rw_mgmt_ip}
+ # get username
+ username = deep_get(
+ config_descriptor, ("config-access", "ssh-access", "default-user")
+ )
+ # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
+ # merged. Meanwhile let's get username from initial-config-primitive
+ if not username and initial_config_primitive_list:
+ for config_primitive in initial_config_primitive_list:
+ for param in config_primitive.get("parameter", ()):
+ if param["name"] == "ssh-username":
+ username = param["value"]
+ break
+ if not username:
+ raise LcmException(
+ "Cannot determine the username neither with 'initial-config-primitive' nor with "
+ "'config-access.ssh-access.default-user'"
+ )
+ credentials["username"] = username
+ # n2vc_redesign STEP 3.2
+
+ self._write_configuration_status(
+ nsr_id=nsr_id,
+ vca_index=vca_index,
+ status="REGISTERING",
+ element_under_configuration=element_under_configuration,
+ element_type=element_type,
+ )
+
+ step = "register execution environment {}".format(credentials)
+ self.logger.debug(logging_text + step)
+ ee_id = await self.vca_map[vca_type].register_execution_environment(
+ credentials=credentials,
+ namespace=namespace,
+ db_dict=db_dict,
+ vca_id=vca_id,
+ )
+
+ # for compatibility with MON/POL modules, the need model and application name at database
+ # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
+ ee_id_parts = ee_id.split(".")
+ db_nsr_update = {db_update_entry + "ee_id": ee_id}
+ if len(ee_id_parts) >= 2:
+ model_name = ee_id_parts[0]
+ application_name = ee_id_parts[1]
+ db_nsr_update[db_update_entry + "model"] = model_name
+ db_nsr_update[db_update_entry + "application"] = application_name
+
+ # n2vc_redesign STEP 3.3
+ step = "Install configuration Software"
+
+ self._write_configuration_status(
+ nsr_id=nsr_id,
+ vca_index=vca_index,
+ status="INSTALLING SW",
+ element_under_configuration=element_under_configuration,
+ element_type=element_type,
+ other_update=db_nsr_update,
+ )
+
+ # TODO check if already done
+ self.logger.debug(logging_text + step)
+ config = None
+ if vca_type == "native_charm":
+ config_primitive = next(
+ (p for p in initial_config_primitive_list if p["name"] == "config"),
+ None,
+ )
+ if config_primitive:
+ config = self._map_primitive_params(
+ config_primitive, {}, deploy_params
+ )
+ num_units = 1
+ if vca_type == "lxc_proxy_charm":
+ if element_type == "NS":
+ num_units = db_nsr.get("config-units") or 1
+ elif element_type == "VNF":
+ num_units = db_vnfr.get("config-units") or 1
+ elif element_type == "VDU":
+ for v in db_vnfr["vdur"]:
+ if vdu_id == v["vdu-id-ref"]:
+ num_units = v.get("config-units") or 1
+ break
+ if vca_type != "k8s_proxy_charm":
+ await self.vca_map[vca_type].install_configuration_sw(
+ ee_id=ee_id,
+ artifact_path=artifact_path,
+ db_dict=db_dict,
+ config=config,
+ num_units=num_units,
+ vca_id=vca_id,
+ vca_type=vca_type,
+ )
+
+ # write in db flag of configuration_sw already installed
+ self.update_db_2(
+ "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
+ )
+
+ # add relations for this VCA (wait for other peers related with this VCA)
+ await self._add_vca_relations(
+ logging_text=logging_text,
+ nsr_id=nsr_id,
+ vca_type=vca_type,
+ vca_index=vca_index,
+ )
+
+ # if SSH access is required, then get execution environment SSH public
+ # if native charm we have waited already to VM be UP
+ if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
+ pub_key = None
+ user = None
+ # self.logger.debug("get ssh key block")
+ if deep_get(
+ config_descriptor, ("config-access", "ssh-access", "required")
+ ):
+ # self.logger.debug("ssh key needed")
+ # Needed to inject a ssh key
+ user = deep_get(
+ config_descriptor,
+ ("config-access", "ssh-access", "default-user"),
+ )
+ step = "Install configuration Software, getting public ssh key"
+ pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
+ ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
+ )
+
+ step = "Insert public key into VM user={} ssh_key={}".format(
+ user, pub_key
+ )
+ else:
+ # self.logger.debug("no need to get ssh key")
+ step = "Waiting to VM being up and getting IP address"
+ self.logger.debug(logging_text + step)
+
+ # default rw_mgmt_ip to None, avoiding the non definition of the variable
+ rw_mgmt_ip = None
+
+ # n2vc_redesign STEP 5.1
+ # wait for RO (ip-address) Insert pub_key into VM
+ if vnfr_id:
+ if kdu_name:
+ rw_mgmt_ip, services = await self.wait_kdu_up(
+ logging_text, nsr_id, vnfr_id, kdu_name
+ )
+ vnfd = self.db.get_one(
+ "vnfds_revisions",
+ {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
+ )
+ kdu = get_kdu(vnfd, kdu_name)
+ kdu_services = [
+ service["name"] for service in get_kdu_services(kdu)
+ ]
+ exposed_services = []
+ for service in services:
+ if any(s in service["name"] for s in kdu_services):
+ exposed_services.append(service)
+ await self.vca_map[vca_type].exec_primitive(
+ ee_id=ee_id,
+ primitive_name="config",
+ params_dict={
+ "osm-config": json.dumps(
+ OsmConfigBuilder(
+ k8s={"services": exposed_services}
+ ).build()
+ )
+ },
+ vca_id=vca_id,
+ )
+
+ # This verification is needed in order to avoid trying to add a public key
+ # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
+ # for a KNF and not for its KDUs, the previous verification gives False, and the code
+ # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
+ # or it is a KNF)
+ elif db_vnfr.get('vdur'):
+ rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
+ logging_text,
+ nsr_id,
+ vnfr_id,
+ vdu_id,
+ vdu_index,
+ user=user,
+ pub_key=pub_key,
+ )
+
+ self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
+
+ # store rw_mgmt_ip in deploy params for later replacement
+ deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
+
+ # n2vc_redesign STEP 6 Execute initial config primitive
+ step = "execute initial config primitive"
+
+ # wait for dependent primitives execution (NS -> VNF -> VDU)
+ if initial_config_primitive_list:
+ await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
+
+ # stage, in function of element type: vdu, kdu, vnf or ns
+ my_vca = vca_deployed_list[vca_index]
+ if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
+ # VDU or KDU
+ stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
+ elif my_vca.get("member-vnf-index"):
+ # VNF
+ stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
+ else:
+ # NS
+ stage[0] = "Stage 5/5: running Day-1 primitives for NS."
+
+ self._write_configuration_status(
+ nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
+ )
+
+ self._write_op_status(op_id=nslcmop_id, stage=stage)
+
+ check_if_terminated_needed = True
+ for initial_config_primitive in initial_config_primitive_list:
+ # adding information on the vca_deployed if it is a NS execution environment
+ if not vca_deployed["member-vnf-index"]:
+ deploy_params["ns_config_info"] = json.dumps(
+ self._get_ns_config_info(nsr_id)
+ )
+ # TODO check if already done
+ primitive_params_ = self._map_primitive_params(
+ initial_config_primitive, {}, deploy_params
+ )
+
+ step = "execute primitive '{}' params '{}'".format(
+ initial_config_primitive["name"], primitive_params_
+ )
+ self.logger.debug(logging_text + step)
+ await self.vca_map[vca_type].exec_primitive(
+ ee_id=ee_id,
+ primitive_name=initial_config_primitive["name"],
+ params_dict=primitive_params_,
+ db_dict=db_dict,
+ vca_id=vca_id,
+ vca_type=vca_type,
+ )
+ # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
+ if check_if_terminated_needed:
+ if config_descriptor.get("terminate-config-primitive"):
+ self.update_db_2(
+ "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
+ )
+ check_if_terminated_needed = False
+
+ # TODO register in database that primitive is done
+
+ # STEP 7 Configure metrics
+ if vca_type == "helm" or vca_type == "helm-v3":
+ prometheus_jobs = await self.extract_prometheus_scrape_jobs(
+ ee_id=ee_id,
+ artifact_path=artifact_path,
+ ee_config_descriptor=ee_config_descriptor,
+ vnfr_id=vnfr_id,
+ nsr_id=nsr_id,
+ target_ip=rw_mgmt_ip,
+ )
+ if prometheus_jobs:
+ self.update_db_2(
+ "nsrs",
+ nsr_id,
+ {db_update_entry + "prometheus_jobs": prometheus_jobs},
+ )
+
+ for job in prometheus_jobs:
+ self.db.set_one(
+ "prometheus_jobs",
+ {"job_name": job["job_name"]},
+ job,
+ upsert=True,
+ fail_on_empty=False,
+ )
+
+ step = "instantiated at VCA"
+ self.logger.debug(logging_text + step)
+
+ self._write_configuration_status(
+ nsr_id=nsr_id, vca_index=vca_index, status="READY"
+ )
+
+ except Exception as e: # TODO not use Exception but N2VC exception
+ # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
+ if not isinstance(
+ e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
+ ):
+ self.logger.error(
+ "Exception while {} : {}".format(step, e), exc_info=True
+ )
+ self._write_configuration_status(
+ nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
+ )
+ raise LcmException("{} {}".format(step, e)) from e
+
+ def _write_ns_status(
+ self,
+ nsr_id: str,
+ ns_state: str,
+ current_operation: str,
+ current_operation_id: str,
+ error_description: str = None,
+ error_detail: str = None,
+ other_update: dict = None,
+ ):
+ """
+ Update db_nsr fields.
+ :param nsr_id:
+ :param ns_state:
+ :param current_operation:
+ :param current_operation_id:
+ :param error_description:
+ :param error_detail:
+ :param other_update: Other required changes at database if provided, will be cleared
+ :return:
+ """
+ try:
+ db_dict = other_update or {}
+ db_dict[
+ "_admin.nslcmop"
+ ] = current_operation_id # for backward compatibility
+ db_dict["_admin.current-operation"] = current_operation_id
+ db_dict["_admin.operation-type"] = (
+ current_operation if current_operation != "IDLE" else None
+ )
+ db_dict["currentOperation"] = current_operation
+ db_dict["currentOperationID"] = current_operation_id
+ db_dict["errorDescription"] = error_description
+ db_dict["errorDetail"] = error_detail
+
+ if ns_state:
+ db_dict["nsState"] = ns_state
+ self.update_db_2("nsrs", nsr_id, db_dict)
+ except DbException as e:
+ self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
+
+ def _write_op_status(
+ self,
+ op_id: str,
+ stage: list = None,
+ error_message: str = None,
+ queuePosition: int = 0,
+ operation_state: str = None,
+ other_update: dict = None,
+ ):
+ try:
+ db_dict = other_update or {}
+ db_dict["queuePosition"] = queuePosition
+ if isinstance(stage, list):
+ db_dict["stage"] = stage[0]
+ db_dict["detailed-status"] = " ".join(stage)
+ elif stage is not None:
+ db_dict["stage"] = str(stage)
+
+ if error_message is not None:
+ db_dict["errorMessage"] = error_message
+ if operation_state is not None:
+ db_dict["operationState"] = operation_state
+ db_dict["statusEnteredTime"] = time()
+ self.update_db_2("nslcmops", op_id, db_dict)
+ except DbException as e:
+ self.logger.warn(
+ "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
+ )
+
+ def _write_all_config_status(self, db_nsr: dict, status: str):
+ try:
+ nsr_id = db_nsr["_id"]
+ # configurationStatus
+ config_status = db_nsr.get("configurationStatus")
+ if config_status:
+ db_nsr_update = {
+ "configurationStatus.{}.status".format(index): status
+ for index, v in enumerate(config_status)
+ if v
+ }
+ # update status
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+
+ except DbException as e:
+ self.logger.warn(
+ "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
+ )
+
+ def _write_configuration_status(
+ self,
+ nsr_id: str,
+ vca_index: int,
+ status: str = None,
+ element_under_configuration: str = None,
+ element_type: str = None,
+ other_update: dict = None,
+ ):
+
+ # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
+ # .format(vca_index, status))
+
+ try:
+ db_path = "configurationStatus.{}.".format(vca_index)
+ db_dict = other_update or {}
+ if status:
+ db_dict[db_path + "status"] = status
+ if element_under_configuration:
+ db_dict[
+ db_path + "elementUnderConfiguration"
+ ] = element_under_configuration
+ if element_type:
+ db_dict[db_path + "elementType"] = element_type
+ self.update_db_2("nsrs", nsr_id, db_dict)
+ except DbException as e:
+ self.logger.warn(
+ "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
+ status, nsr_id, vca_index, e
+ )
+ )
+
+ async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
+ """
+ Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
+ sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
+ Database is used because the result can be obtained from a different LCM worker in case of HA.
+ :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
+ :param db_nslcmop: database content of nslcmop
+ :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
+ :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
+ computed 'vim-account-id'
+ """
+ modified = False
+ nslcmop_id = db_nslcmop["_id"]
+ placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
+ if placement_engine == "PLA":
+ self.logger.debug(
+ logging_text + "Invoke and wait for placement optimization"
+ )
+ await self.msg.aiowrite(
+ "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
+ )
+ db_poll_interval = 5
+ wait = db_poll_interval * 10
+ pla_result = None
+ while not pla_result and wait >= 0:
+ await asyncio.sleep(db_poll_interval)
+ wait -= db_poll_interval
+ db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
+ pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
+
+ if not pla_result:
+ raise LcmException(
+ "Placement timeout for nslcmopId={}".format(nslcmop_id)
+ )
+
+ for pla_vnf in pla_result["vnf"]:
+ vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
+ if not pla_vnf.get("vimAccountId") or not vnfr:
+ continue
+ modified = True
+ self.db.set_one(
+ "vnfrs",
+ {"_id": vnfr["_id"]},
+ {"vim-account-id": pla_vnf["vimAccountId"]},
+ )
+ # Modifies db_vnfrs
+ vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
+ return modified
+
+ def update_nsrs_with_pla_result(self, params):
+ try:
+ nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
+ self.update_db_2(
+ "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
+ )
+ except Exception as e:
+ self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
+
+ async def instantiate(self, nsr_id, nslcmop_id):
+ """
+
+ :param nsr_id: ns instance to deploy
+ :param nslcmop_id: operation to run
+ :return:
+ """
+
+ # Try to lock HA task here
+ task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
+ if not task_is_locked_by_me:
+ self.logger.debug(
+ "instantiate() task is not locked by me, ns={}".format(nsr_id)
+ )
+ return
+
+ logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
+ self.logger.debug(logging_text + "Enter")
+
+ # get all needed from database
+
+ # database nsrs record
+ db_nsr = None
+
+ # database nslcmops record
+ db_nslcmop = None
+
+ # update operation on nsrs
+ db_nsr_update = {}
+ # update operation on nslcmops
+ db_nslcmop_update = {}
+
+ nslcmop_operation_state = None
+ db_vnfrs = {} # vnf's info indexed by member-index
+ # n2vc_info = {}
+ tasks_dict_info = {} # from task to info text
+ exc = None
+ error_list = []
+ stage = [
+ "Stage 1/5: preparation of the environment.",
+ "Waiting for previous operations to terminate.",
+ "",
+ ]
+ # ^ stage, step, VIM progress
+ try:
+ # wait for any previous tasks in process
+ await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
+
+ # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
+ stage[1] = "Reading from database."
+ # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
+ db_nsr_update["detailed-status"] = "creating"
+ db_nsr_update["operational-status"] = "init"
+ self._write_ns_status(
+ nsr_id=nsr_id,
+ ns_state="BUILDING",
+ current_operation="INSTANTIATING",
+ current_operation_id=nslcmop_id,
+ other_update=db_nsr_update,
+ )
+ self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
+
+ # read from db: operation
+ stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
+ db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
+ if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
+ db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
+ db_nslcmop["operationParams"]["additionalParamsForVnf"]
+ )
+ ns_params = db_nslcmop.get("operationParams")
+ if ns_params and ns_params.get("timeout_ns_deploy"):
+ timeout_ns_deploy = ns_params["timeout_ns_deploy"]
+ else:
+ timeout_ns_deploy = self.timeout.get(
+ "ns_deploy", self.timeout_ns_deploy
+ )
+
+ # read from db: ns
+ stage[1] = "Getting nsr={} from db.".format(nsr_id)
+ self.logger.debug(logging_text + stage[1])
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
+ self.logger.debug(logging_text + stage[1])
+ nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
+ self.fs.sync(db_nsr["nsd-id"])
+ db_nsr["nsd"] = nsd
+ # nsr_name = db_nsr["name"] # TODO short-name??
+
+ # read from db: vnf's of this ns
+ stage[1] = "Getting vnfrs from db."
+ self.logger.debug(logging_text + stage[1])
+ db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
+
+ # read from db: vnfd's for every vnf
+ db_vnfds = [] # every vnfd data
+
+ # for each vnf in ns, read vnfd
+ for vnfr in db_vnfrs_list:
+ if vnfr.get("kdur"):
+ kdur_list = []
+ for kdur in vnfr["kdur"]:
+ if kdur.get("additionalParams"):
+ kdur["additionalParams"] = json.loads(
+ kdur["additionalParams"]
+ )
+ kdur_list.append(kdur)
+ vnfr["kdur"] = kdur_list
+
+ db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
+ vnfd_id = vnfr["vnfd-id"]
+ vnfd_ref = vnfr["vnfd-ref"]
+ self.fs.sync(vnfd_id)
+
+ # if we haven't this vnfd, read it from db
+ if vnfd_id not in db_vnfds:
+ # read from db
+ stage[1] = "Getting vnfd={} id='{}' from db.".format(
+ vnfd_id, vnfd_ref
+ )
+ self.logger.debug(logging_text + stage[1])
+ vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
+
+ # store vnfd
+ db_vnfds.append(vnfd)
+
+ # Get or generates the _admin.deployed.VCA list
+ vca_deployed_list = None
+ if db_nsr["_admin"].get("deployed"):
+ vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
+ if vca_deployed_list is None:
+ vca_deployed_list = []
+ configuration_status_list = []
+ db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
+ db_nsr_update["configurationStatus"] = configuration_status_list
+ # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
+ populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
+ elif isinstance(vca_deployed_list, dict):
+ # maintain backward compatibility. Change a dict to list at database
+ vca_deployed_list = list(vca_deployed_list.values())
+ db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
+ populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
+
+ if not isinstance(
+ deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
+ ):
+ populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
+ db_nsr_update["_admin.deployed.RO.vnfd"] = []
+
+ # set state to INSTANTIATED. When instantiated NBI will not delete directly
+ db_nsr_update["_admin.nsState"] = "INSTANTIATED"
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ self.db.set_list(
+ "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
+ )
+
+ # n2vc_redesign STEP 2 Deploy Network Scenario
+ stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
+ self._write_op_status(op_id=nslcmop_id, stage=stage)
+
+ stage[1] = "Deploying KDUs."
+ # self.logger.debug(logging_text + "Before deploy_kdus")
+ # Call to deploy_kdus in case exists the "vdu:kdu" param
+ await self.deploy_kdus(
+ logging_text=logging_text,
+ nsr_id=nsr_id,
+ nslcmop_id=nslcmop_id,
+ db_vnfrs=db_vnfrs,
+ db_vnfds=db_vnfds,
+ task_instantiation_info=tasks_dict_info,
+ )
+
+ stage[1] = "Getting VCA public key."
+ # n2vc_redesign STEP 1 Get VCA public ssh-key
+ # feature 1429. Add n2vc public key to needed VMs
+ n2vc_key = self.n2vc.get_public_key()
+ n2vc_key_list = [n2vc_key]
+ if self.vca_config.get("public_key"):
+ n2vc_key_list.append(self.vca_config["public_key"])
+
+ stage[1] = "Deploying NS at VIM."
+ task_ro = asyncio.ensure_future(
+ self.instantiate_RO(
+ logging_text=logging_text,
+ nsr_id=nsr_id,
+ nsd=nsd,
+ db_nsr=db_nsr,
+ db_nslcmop=db_nslcmop,
+ db_vnfrs=db_vnfrs,
+ db_vnfds=db_vnfds,
+ n2vc_key_list=n2vc_key_list,
+ stage=stage,
+ )
+ )
+ self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
+ tasks_dict_info[task_ro] = "Deploying at VIM"
+
+ # n2vc_redesign STEP 3 to 6 Deploy N2VC
+ stage[1] = "Deploying Execution Environments."
+ self.logger.debug(logging_text + stage[1])
+
+ nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
+ for vnf_profile in get_vnf_profiles(nsd):
+ vnfd_id = vnf_profile["vnfd-id"]
+ vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
+ member_vnf_index = str(vnf_profile["id"])
+ db_vnfr = db_vnfrs[member_vnf_index]
+ base_folder = vnfd["_admin"]["storage"]
+ vdu_id = None
+ vdu_index = 0
+ vdu_name = None
+ kdu_name = None
+
+ # Get additional parameters
+ deploy_params = {"OSM": get_osm_params(db_vnfr)}
+ if db_vnfr.get("additionalParamsForVnf"):
+ deploy_params.update(
+ parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
+ )
+
+ descriptor_config = get_configuration(vnfd, vnfd["id"])
+ if descriptor_config:
+ self._deploy_n2vc(
+ logging_text=logging_text
+ + "member_vnf_index={} ".format(member_vnf_index),
+ db_nsr=db_nsr,
+ db_vnfr=db_vnfr,
+ nslcmop_id=nslcmop_id,
+ nsr_id=nsr_id,
+ nsi_id=nsi_id,
+ vnfd_id=vnfd_id,
+ vdu_id=vdu_id,
+ kdu_name=kdu_name,
+ member_vnf_index=member_vnf_index,
+ vdu_index=vdu_index,
+ vdu_name=vdu_name,
+ deploy_params=deploy_params,
+ descriptor_config=descriptor_config,
+ base_folder=base_folder,
+ task_instantiation_info=tasks_dict_info,
+ stage=stage,
+ )
+
+ # Deploy charms for each VDU that supports one.
+ for vdud in get_vdu_list(vnfd):
+ vdu_id = vdud["id"]
+ descriptor_config = get_configuration(vnfd, vdu_id)
+ vdur = find_in_list(
+ db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
+ )
+
+ if vdur.get("additionalParams"):
+ deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
+ else:
+ deploy_params_vdu = deploy_params
+ deploy_params_vdu["OSM"] = get_osm_params(
+ db_vnfr, vdu_id, vdu_count_index=0
+ )
+ vdud_count = get_number_of_instances(vnfd, vdu_id)
+
+ self.logger.debug("VDUD > {}".format(vdud))
+ self.logger.debug(
+ "Descriptor config > {}".format(descriptor_config)
+ )
+ if descriptor_config:
+ vdu_name = None
+ kdu_name = None
+ for vdu_index in range(vdud_count):
+ # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
+ self._deploy_n2vc(
+ logging_text=logging_text
+ + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
+ member_vnf_index, vdu_id, vdu_index
+ ),
+ db_nsr=db_nsr,
+ db_vnfr=db_vnfr,
+ nslcmop_id=nslcmop_id,
+ nsr_id=nsr_id,
+ nsi_id=nsi_id,
+ vnfd_id=vnfd_id,
+ vdu_id=vdu_id,
+ kdu_name=kdu_name,
+ member_vnf_index=member_vnf_index,
+ vdu_index=vdu_index,
+ vdu_name=vdu_name,
+ deploy_params=deploy_params_vdu,
+ descriptor_config=descriptor_config,
+ base_folder=base_folder,
+ task_instantiation_info=tasks_dict_info,
+ stage=stage,
+ )
+ for kdud in get_kdu_list(vnfd):
+ kdu_name = kdud["name"]
+ descriptor_config = get_configuration(vnfd, kdu_name)
+ if descriptor_config:
+ vdu_id = None
+ vdu_index = 0
+ vdu_name = None
+ kdur = next(
+ x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
+ )
+ deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
+ if kdur.get("additionalParams"):
+ deploy_params_kdu.update(
+ parse_yaml_strings(kdur["additionalParams"].copy())
+ )
+
+ self._deploy_n2vc(
+ logging_text=logging_text,
+ db_nsr=db_nsr,
+ db_vnfr=db_vnfr,
+ nslcmop_id=nslcmop_id,
+ nsr_id=nsr_id,
+ nsi_id=nsi_id,
+ vnfd_id=vnfd_id,
+ vdu_id=vdu_id,
+ kdu_name=kdu_name,
+ member_vnf_index=member_vnf_index,
+ vdu_index=vdu_index,
+ vdu_name=vdu_name,
+ deploy_params=deploy_params_kdu,
+ descriptor_config=descriptor_config,
+ base_folder=base_folder,
+ task_instantiation_info=tasks_dict_info,
+ stage=stage,
+ )
+
+ # Check if this NS has a charm configuration
+ descriptor_config = nsd.get("ns-configuration")
+ if descriptor_config and descriptor_config.get("juju"):
+ vnfd_id = None
+ db_vnfr = None
+ member_vnf_index = None
+ vdu_id = None
+ kdu_name = None
+ vdu_index = 0
+ vdu_name = None
+
+ # Get additional parameters
+ deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
+ if db_nsr.get("additionalParamsForNs"):
+ deploy_params.update(
+ parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
+ )
+ base_folder = nsd["_admin"]["storage"]
+ self._deploy_n2vc(
+ logging_text=logging_text,
+ db_nsr=db_nsr,
+ db_vnfr=db_vnfr,
+ nslcmop_id=nslcmop_id,
+ nsr_id=nsr_id,
+ nsi_id=nsi_id,
+ vnfd_id=vnfd_id,
+ vdu_id=vdu_id,
+ kdu_name=kdu_name,
+ member_vnf_index=member_vnf_index,
+ vdu_index=vdu_index,
+ vdu_name=vdu_name,
+ deploy_params=deploy_params,
+ descriptor_config=descriptor_config,
+ base_folder=base_folder,
+ task_instantiation_info=tasks_dict_info,
+ stage=stage,
+ )
+
+ # rest of staff will be done at finally
+
+ except (
+ ROclient.ROClientException,
+ DbException,
+ LcmException,
+ N2VCException,
+ ) as e:
+ self.logger.error(
+ logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
+ )
+ exc = e
+ except asyncio.CancelledError:
+ self.logger.error(
+ logging_text + "Cancelled Exception while '{}'".format(stage[1])
+ )
+ exc = "Operation was cancelled"
+ except Exception as e:
+ exc = traceback.format_exc()
+ self.logger.critical(
+ logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
+ exc_info=True,
+ )
+ finally:
+ if exc:
+ error_list.append(str(exc))
+ try:
+ # wait for pending tasks
+ if tasks_dict_info:
+ stage[1] = "Waiting for instantiate pending tasks."
+ self.logger.debug(logging_text + stage[1])
+ error_list += await self._wait_for_tasks(
+ logging_text,
+ tasks_dict_info,
+ timeout_ns_deploy,
+ stage,
+ nslcmop_id,
+ nsr_id=nsr_id,
+ )
+ stage[1] = stage[2] = ""
+ except asyncio.CancelledError:
+ error_list.append("Cancelled")
+ # TODO cancel all tasks
+ except Exception as exc:
+ error_list.append(str(exc))
+
+ # update operation-status
+ db_nsr_update["operational-status"] = "running"
+ # let's begin with VCA 'configured' status (later we can change it)
+ db_nsr_update["config-status"] = "configured"
+ for task, task_name in tasks_dict_info.items():
+ if not task.done() or task.cancelled() or task.exception():
+ if task_name.startswith(self.task_name_deploy_vca):
+ # A N2VC task is pending
+ db_nsr_update["config-status"] = "failed"
+ else:
+ # RO or KDU task is pending
+ db_nsr_update["operational-status"] = "failed"
+
+ # update status at database
+ if error_list:
+ error_detail = ". ".join(error_list)
+ self.logger.error(logging_text + error_detail)
+ error_description_nslcmop = "{} Detail: {}".format(
+ stage[0], error_detail
+ )
+ error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
+ nslcmop_id, stage[0]
+ )
+
+ db_nsr_update["detailed-status"] = (
+ error_description_nsr + " Detail: " + error_detail
+ )
+ db_nslcmop_update["detailed-status"] = error_detail
+ nslcmop_operation_state = "FAILED"
+ ns_state = "BROKEN"
+ else:
+ error_detail = None
+ error_description_nsr = error_description_nslcmop = None
+ ns_state = "READY"
+ db_nsr_update["detailed-status"] = "Done"
+ db_nslcmop_update["detailed-status"] = "Done"
+ nslcmop_operation_state = "COMPLETED"
+
+ if db_nsr:
+ self._write_ns_status(
+ nsr_id=nsr_id,
+ ns_state=ns_state,
+ current_operation="IDLE",
+ current_operation_id=None,
+ error_description=error_description_nsr,
+ error_detail=error_detail,
+ other_update=db_nsr_update,
+ )
+ self._write_op_status(
+ op_id=nslcmop_id,
+ stage="",
+ error_message=error_description_nslcmop,
+ operation_state=nslcmop_operation_state,
+ other_update=db_nslcmop_update,
+ )
+
+ if nslcmop_operation_state:
+ try:
+ await self.msg.aiowrite(
+ "ns",
+ "instantiated",
+ {
+ "nsr_id": nsr_id,
+ "nslcmop_id": nslcmop_id,
+ "operationState": nslcmop_operation_state,
+ },
+ loop=self.loop,
+ )
+ except Exception as e:
+ self.logger.error(
+ logging_text + "kafka_write notification Exception {}".format(e)
+ )
+
+ self.logger.debug(logging_text + "Exit")
+ self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
+
+ def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]):
+ if vnfd_id not in cached_vnfds:
+ cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id})
+ return cached_vnfds[vnfd_id]
+
+ def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
+ if vnf_profile_id not in cached_vnfrs:
+ cached_vnfrs[vnf_profile_id] = self.db.get_one(
+ "vnfrs",
+ {
+ "member-vnf-index-ref": vnf_profile_id,
+ "nsr-id-ref": nsr_id,
+ },
+ )
+ return cached_vnfrs[vnf_profile_id]
+
+ def _is_deployed_vca_in_relation(
+ self, vca: DeployedVCA, relation: Relation
+ ) -> bool:
+ found = False
+ for endpoint in (relation.provider, relation.requirer):
+ if endpoint["kdu-resource-profile-id"]:
+ continue
+ found = (
+ vca.vnf_profile_id == endpoint.vnf_profile_id
+ and vca.vdu_profile_id == endpoint.vdu_profile_id
+ and vca.execution_environment_ref == endpoint.execution_environment_ref
+ )
+ if found:
+ break
+ return found
+
+ def _update_ee_relation_data_with_implicit_data(
+ self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
+ ):
+ ee_relation_data = safe_get_ee_relation(
+ nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
+ )
+ ee_relation_level = EELevel.get_level(ee_relation_data)
+ if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
+ "execution-environment-ref"
+ ]:
+ vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
+ vnfd_id = vnf_profile["vnfd-id"]
+ db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
+ entity_id = (
+ vnfd_id
+ if ee_relation_level == EELevel.VNF
+ else ee_relation_data["vdu-profile-id"]
+ )
+ ee = get_juju_ee_ref(db_vnfd, entity_id)
+ if not ee:
+ raise Exception(
+ f"not execution environments found for ee_relation {ee_relation_data}"
+ )
+ ee_relation_data["execution-environment-ref"] = ee["id"]
+ return ee_relation_data
+
+ def _get_ns_relations(
+ self,
+ nsr_id: str,
+ nsd: Dict[str, Any],
+ vca: DeployedVCA,
+ cached_vnfds: Dict[str, Any],
+ ) -> List[Relation]:
+ relations = []
+ db_ns_relations = get_ns_configuration_relation_list(nsd)
+ for r in db_ns_relations:
+ provider_dict = None
+ requirer_dict = None
+ if all(key in r for key in ("provider", "requirer")):
+ provider_dict = r["provider"]
+ requirer_dict = r["requirer"]
+ elif "entities" in r:
+ provider_id = r["entities"][0]["id"]
+ provider_dict = {
+ "nsr-id": nsr_id,
+ "endpoint": r["entities"][0]["endpoint"],
+ }
+ if provider_id != nsd["id"]:
+ provider_dict["vnf-profile-id"] = provider_id
+ requirer_id = r["entities"][1]["id"]
+ requirer_dict = {
+ "nsr-id": nsr_id,
+ "endpoint": r["entities"][1]["endpoint"],
+ }
+ if requirer_id != nsd["id"]:
+ requirer_dict["vnf-profile-id"] = requirer_id
+ else:
+ raise Exception(
+ "provider/requirer or entities must be included in the relation."
+ )
+ relation_provider = self._update_ee_relation_data_with_implicit_data(
+ nsr_id, nsd, provider_dict, cached_vnfds
+ )
+ relation_requirer = self._update_ee_relation_data_with_implicit_data(
+ nsr_id, nsd, requirer_dict, cached_vnfds
+ )
+ provider = EERelation(relation_provider)
+ requirer = EERelation(relation_requirer)
+ relation = Relation(r["name"], provider, requirer)
+ vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
+ if vca_in_relation:
+ relations.append(relation)
+ return relations
+
+ def _get_vnf_relations(
+ self,
+ nsr_id: str,
+ nsd: Dict[str, Any],
+ vca: DeployedVCA,
+ cached_vnfds: Dict[str, Any],
+ ) -> List[Relation]:
+ relations = []
+ vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
+ vnf_profile_id = vnf_profile["id"]
+ vnfd_id = vnf_profile["vnfd-id"]
+ db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
+ db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
+ for r in db_vnf_relations:
+ provider_dict = None
+ requirer_dict = None
+ if all(key in r for key in ("provider", "requirer")):
+ provider_dict = r["provider"]
+ requirer_dict = r["requirer"]
+ elif "entities" in r:
+ provider_id = r["entities"][0]["id"]
+ provider_dict = {
+ "nsr-id": nsr_id,
+ "vnf-profile-id": vnf_profile_id,
+ "endpoint": r["entities"][0]["endpoint"],
+ }
+ if provider_id != vnfd_id:
+ provider_dict["vdu-profile-id"] = provider_id
+ requirer_id = r["entities"][1]["id"]
+ requirer_dict = {
+ "nsr-id": nsr_id,
+ "vnf-profile-id": vnf_profile_id,
+ "endpoint": r["entities"][1]["endpoint"],
+ }
+ if requirer_id != vnfd_id:
+ requirer_dict["vdu-profile-id"] = requirer_id
+ else:
+ raise Exception(
+ "provider/requirer or entities must be included in the relation."
+ )
+ relation_provider = self._update_ee_relation_data_with_implicit_data(
+ nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
+ )
+ relation_requirer = self._update_ee_relation_data_with_implicit_data(
+ nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
+ )
+ provider = EERelation(relation_provider)
+ requirer = EERelation(relation_requirer)
+ relation = Relation(r["name"], provider, requirer)
+ vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
+ if vca_in_relation:
+ relations.append(relation)
+ return relations
+
+ def _get_kdu_resource_data(
+ self,
+ ee_relation: EERelation,
+ db_nsr: Dict[str, Any],
+ cached_vnfds: Dict[str, Any],
+ ) -> DeployedK8sResource:
+ nsd = get_nsd(db_nsr)
+ vnf_profiles = get_vnf_profiles(nsd)
+ vnfd_id = find_in_list(
+ vnf_profiles,
+ lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
+ )["vnfd-id"]
+ db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
+ kdu_resource_profile = get_kdu_resource_profile(
+ db_vnfd, ee_relation.kdu_resource_profile_id
+ )
+ kdu_name = kdu_resource_profile["kdu-name"]
+ deployed_kdu, _ = get_deployed_kdu(
+ db_nsr.get("_admin", ()).get("deployed", ()),
+ kdu_name,
+ ee_relation.vnf_profile_id,
+ )
+ deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
+ return deployed_kdu
+
+ def _get_deployed_component(
+ self,
+ ee_relation: EERelation,
+ db_nsr: Dict[str, Any],
+ cached_vnfds: Dict[str, Any],
+ ) -> DeployedComponent:
+ nsr_id = db_nsr["_id"]
+ deployed_component = None
+ ee_level = EELevel.get_level(ee_relation)
+ if ee_level == EELevel.NS:
+ vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
+ if vca:
+ deployed_component = DeployedVCA(nsr_id, vca)
+ elif ee_level == EELevel.VNF:
+ vca = get_deployed_vca(
+ db_nsr,
+ {
+ "vdu_id": None,
+ "member-vnf-index": ee_relation.vnf_profile_id,
+ "ee_descriptor_id": ee_relation.execution_environment_ref,
+ },
+ )
+ if vca:
+ deployed_component = DeployedVCA(nsr_id, vca)
+ elif ee_level == EELevel.VDU:
+ vca = get_deployed_vca(
+ db_nsr,
+ {
+ "vdu_id": ee_relation.vdu_profile_id,
+ "member-vnf-index": ee_relation.vnf_profile_id,
+ "ee_descriptor_id": ee_relation.execution_environment_ref,
+ },
+ )
+ if vca:
+ deployed_component = DeployedVCA(nsr_id, vca)
+ elif ee_level == EELevel.KDU:
+ kdu_resource_data = self._get_kdu_resource_data(
+ ee_relation, db_nsr, cached_vnfds
+ )
+ if kdu_resource_data:
+ deployed_component = DeployedK8sResource(kdu_resource_data)
+ return deployed_component
+
+ async def _add_relation(
+ self,
+ relation: Relation,
+ vca_type: str,
+ db_nsr: Dict[str, Any],
+ cached_vnfds: Dict[str, Any],
+ cached_vnfrs: Dict[str, Any],
+ ) -> bool:
+ deployed_provider = self._get_deployed_component(
+ relation.provider, db_nsr, cached_vnfds
+ )
+ deployed_requirer = self._get_deployed_component(
+ relation.requirer, db_nsr, cached_vnfds
+ )
+ if (
+ deployed_provider
+ and deployed_requirer
+ and deployed_provider.config_sw_installed
+ and deployed_requirer.config_sw_installed
+ ):
+ provider_db_vnfr = (
+ self._get_vnfr(
+ relation.provider.nsr_id,
+ relation.provider.vnf_profile_id,
+ cached_vnfrs,
+ )
+ if relation.provider.vnf_profile_id
+ else None
+ )
+ requirer_db_vnfr = (
+ self._get_vnfr(
+ relation.requirer.nsr_id,
+ relation.requirer.vnf_profile_id,
+ cached_vnfrs,
+ )
+ if relation.requirer.vnf_profile_id
+ else None
+ )
+ provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
+ requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
+ provider_relation_endpoint = RelationEndpoint(
+ deployed_provider.ee_id,
+ provider_vca_id,
+ relation.provider.endpoint,
+ )
+ requirer_relation_endpoint = RelationEndpoint(
+ deployed_requirer.ee_id,
+ requirer_vca_id,
+ relation.requirer.endpoint,
+ )
+ await self.vca_map[vca_type].add_relation(
+ provider=provider_relation_endpoint,
+ requirer=requirer_relation_endpoint,
+ )
+ # remove entry from relations list
+ return True
+ return False
+
+ async def _add_vca_relations(
+ self,
+ logging_text,
+ nsr_id,
+ vca_type: str,
+ vca_index: int,
+ timeout: int = 3600,
+ ) -> bool:
+
+ # steps:
+ # 1. find all relations for this VCA
+ # 2. wait for other peers related
+ # 3. add relations
+
+ try:
+ # STEP 1: find all relations for this VCA
+
+ # read nsr record
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ nsd = get_nsd(db_nsr)
+
+ # this VCA data
+ deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
+ my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
+
+ cached_vnfds = {}
+ cached_vnfrs = {}
+ relations = []
+ relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
+ relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
+
+ # if no relations, terminate
+ if not relations:
+ self.logger.debug(logging_text + " No relations")
+ return True
+
+ self.logger.debug(logging_text + " adding relations {}".format(relations))
+
+ # add all relations
+ start = time()
+ while True:
+ # check timeout
+ now = time()
+ if now - start >= timeout:
+ self.logger.error(logging_text + " : timeout adding relations")
+ return False
+
+ # reload nsr from database (we need to update record: _admin.deployed.VCA)
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+
+ # for each relation, find the VCA's related
+ for relation in relations.copy():
+ added = await self._add_relation(
+ relation,
+ vca_type,
+ db_nsr,
+ cached_vnfds,
+ cached_vnfrs,
+ )
+ if added:
+ relations.remove(relation)
+
+ if not relations:
+ self.logger.debug("Relations added")
+ break
+ await asyncio.sleep(5.0)
+
+ return True
+
+ except Exception as e:
+ self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
+ return False
+
+ async def _install_kdu(
+ self,
+ nsr_id: str,
+ nsr_db_path: str,
+ vnfr_data: dict,
+ kdu_index: int,
+ kdud: dict,
+ vnfd: dict,
+ k8s_instance_info: dict,
+ k8params: dict = None,
+ timeout: int = 600,
+ vca_id: str = None,
+ ):
+
+ try:
+ k8sclustertype = k8s_instance_info["k8scluster-type"]
+ # Instantiate kdu
+ db_dict_install = {
+ "collection": "nsrs",
+ "filter": {"_id": nsr_id},
+ "path": nsr_db_path,
+ }
+
+ if k8s_instance_info.get("kdu-deployment-name"):
+ kdu_instance = k8s_instance_info.get("kdu-deployment-name")
+ else:
+ kdu_instance = self.k8scluster_map[
+ k8sclustertype
+ ].generate_kdu_instance_name(
+ db_dict=db_dict_install,
+ kdu_model=k8s_instance_info["kdu-model"],
+ kdu_name=k8s_instance_info["kdu-name"],
+ )
+
+ # Update the nsrs table with the kdu-instance value
+ self.update_db_2(
+ item="nsrs",
+ _id=nsr_id,
+ _desc={nsr_db_path + ".kdu-instance": kdu_instance},
+ )
+
+ # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
+ # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
+ # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
+ # namespace, this first verification could be removed, and the next step would be done for any kind
+ # of KNF.
+ # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
+ # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
+ if k8sclustertype in ("juju", "juju-bundle"):
+ # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
+ # that the user passed a namespace which he wants its KDU to be deployed in)
+ if (
+ self.db.count(
+ table="nsrs",
+ q_filter={
+ "_id": nsr_id,
+ "_admin.projects_write": k8s_instance_info["namespace"],
+ "_admin.projects_read": k8s_instance_info["namespace"],
+ },
+ )
+ > 0
+ ):
+ self.logger.debug(
+ f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
+ )
+ self.update_db_2(
+ item="nsrs",
+ _id=nsr_id,
+ _desc={f"{nsr_db_path}.namespace": kdu_instance},
+ )
+ k8s_instance_info["namespace"] = kdu_instance
+
+ await self.k8scluster_map[k8sclustertype].install(
+ cluster_uuid=k8s_instance_info["k8scluster-uuid"],
+ kdu_model=k8s_instance_info["kdu-model"],
+ atomic=True,
+ params=k8params,
+ db_dict=db_dict_install,
+ timeout=timeout,
+ kdu_name=k8s_instance_info["kdu-name"],
+ namespace=k8s_instance_info["namespace"],
+ kdu_instance=kdu_instance,
+ vca_id=vca_id,
+ )
+
+ # Obtain services to obtain management service ip
+ services = await self.k8scluster_map[k8sclustertype].get_services(
+ cluster_uuid=k8s_instance_info["k8scluster-uuid"],
+ kdu_instance=kdu_instance,
+ namespace=k8s_instance_info["namespace"],
+ )
+
+ # Obtain management service info (if exists)
+ vnfr_update_dict = {}
+ kdu_config = get_configuration(vnfd, kdud["name"])
+ if kdu_config:
+ target_ee_list = kdu_config.get("execution-environment-list", [])
+ else:
+ target_ee_list = []
+
+ if services:
+ vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
+ mgmt_services = [
+ service
+ for service in kdud.get("service", [])
+ if service.get("mgmt-service")
+ ]
+ for mgmt_service in mgmt_services:
+ for service in services:
+ if service["name"].startswith(mgmt_service["name"]):
+ # Mgmt service found, Obtain service ip
+ ip = service.get("external_ip", service.get("cluster_ip"))
+ if isinstance(ip, list) and len(ip) == 1:
+ ip = ip[0]
+
+ vnfr_update_dict[
+ "kdur.{}.ip-address".format(kdu_index)
+ ] = ip
+
+ # Check if must update also mgmt ip at the vnf
+ service_external_cp = mgmt_service.get(
+ "external-connection-point-ref"
+ )
+ if service_external_cp:
+ if (
+ deep_get(vnfd, ("mgmt-interface", "cp"))
+ == service_external_cp
+ ):
+ vnfr_update_dict["ip-address"] = ip
+
+ if find_in_list(
+ target_ee_list,
+ lambda ee: ee.get(
+ "external-connection-point-ref", ""
+ )
+ == service_external_cp,
+ ):
+ vnfr_update_dict[
+ "kdur.{}.ip-address".format(kdu_index)
+ ] = ip
+ break
+ else:
+ self.logger.warn(
+ "Mgmt service name: {} not found".format(
+ mgmt_service["name"]
+ )
+ )
+
+ vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
+ self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
+
+ kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
+ if (
+ kdu_config
+ and kdu_config.get("initial-config-primitive")
+ and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
+ ):
+ initial_config_primitive_list = kdu_config.get(
+ "initial-config-primitive"
+ )
+ initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
+
+ for initial_config_primitive in initial_config_primitive_list:
+ primitive_params_ = self._map_primitive_params(
+ initial_config_primitive, {}, {}
+ )
+
+ await asyncio.wait_for(
+ self.k8scluster_map[k8sclustertype].exec_primitive(
+ cluster_uuid=k8s_instance_info["k8scluster-uuid"],
+ kdu_instance=kdu_instance,
+ primitive_name=initial_config_primitive["name"],
+ params=primitive_params_,
+ db_dict=db_dict_install,
+ vca_id=vca_id,
+ ),
+ timeout=timeout,
+ )
+
+ except Exception as e:
+ # Prepare update db with error and raise exception
+ try:
+ self.update_db_2(
+ "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
+ )
+ self.update_db_2(
+ "vnfrs",
+ vnfr_data.get("_id"),
+ {"kdur.{}.status".format(kdu_index): "ERROR"},
+ )
+ except Exception:
+ # ignore to keep original exception
+ pass
+ # reraise original error
+ raise
+
+ return kdu_instance
+
+ async def deploy_kdus(
+ self,
+ logging_text,
+ nsr_id,
+ nslcmop_id,
+ db_vnfrs,
+ db_vnfds,
+ task_instantiation_info,
+ ):
+ # Launch kdus if present in the descriptor
+
+ k8scluster_id_2_uuic = {
+ "helm-chart-v3": {},
+ "helm-chart": {},
+ "juju-bundle": {},
+ }
+
+ async def _get_cluster_id(cluster_id, cluster_type):
+ nonlocal k8scluster_id_2_uuic
+ if cluster_id in k8scluster_id_2_uuic[cluster_type]:
+ return k8scluster_id_2_uuic[cluster_type][cluster_id]
+
+ # check if K8scluster is creating and wait look if previous tasks in process
+ task_name, task_dependency = self.lcm_tasks.lookfor_related(
+ "k8scluster", cluster_id
+ )
+ if task_dependency:
+ text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
+ task_name, cluster_id
+ )
+ self.logger.debug(logging_text + text)
+ await asyncio.wait(task_dependency, timeout=3600)
+
+ db_k8scluster = self.db.get_one(
+ "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
+ )
+ if not db_k8scluster:
+ raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
+
+ k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
+ if not k8s_id:
+ if cluster_type == "helm-chart-v3":
+ try:
+ # backward compatibility for existing clusters that have not been initialized for helm v3
+ k8s_credentials = yaml.safe_dump(
+ db_k8scluster.get("credentials")
+ )
+ k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
+ k8s_credentials, reuse_cluster_uuid=cluster_id
+ )
+ db_k8scluster_update = {}
+ db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
+ db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
+ db_k8scluster_update[
+ "_admin.helm-chart-v3.created"
+ ] = uninstall_sw
+ db_k8scluster_update[
+ "_admin.helm-chart-v3.operationalState"
+ ] = "ENABLED"
+ self.update_db_2(
+ "k8sclusters", cluster_id, db_k8scluster_update
+ )
+ except Exception as e:
+ self.logger.error(
+ logging_text
+ + "error initializing helm-v3 cluster: {}".format(str(e))
+ )
+ raise LcmException(
+ "K8s cluster '{}' has not been initialized for '{}'".format(
+ cluster_id, cluster_type
+ )
+ )
+ else:
+ raise LcmException(
+ "K8s cluster '{}' has not been initialized for '{}'".format(
+ cluster_id, cluster_type
+ )
+ )
+ k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
+ return k8s_id
+
+ logging_text += "Deploy kdus: "
+ step = ""
+ try:
+ db_nsr_update = {"_admin.deployed.K8s": []}
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+
+ index = 0
+ updated_cluster_list = []
+ updated_v3_cluster_list = []
+
+ for vnfr_data in db_vnfrs.values():
+ vca_id = self.get_vca_id(vnfr_data, {})
+ for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
+ # Step 0: Prepare and set parameters
+ desc_params = parse_yaml_strings(kdur.get("additionalParams"))
+ vnfd_id = vnfr_data.get("vnfd-id")
+ vnfd_with_id = find_in_list(
+ db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
+ )
+ kdud = next(
+ kdud
+ for kdud in vnfd_with_id["kdu"]
+ if kdud["name"] == kdur["kdu-name"]
+ )
+ namespace = kdur.get("k8s-namespace")
+ kdu_deployment_name = kdur.get("kdu-deployment-name")
+ if kdur.get("helm-chart"):
+ kdumodel = kdur["helm-chart"]
+ # Default version: helm3, if helm-version is v2 assign v2
+ k8sclustertype = "helm-chart-v3"
+ self.logger.debug("kdur: {}".format(kdur))
+ if (
+ kdur.get("helm-version")
+ and kdur.get("helm-version") == "v2"
+ ):
+ k8sclustertype = "helm-chart"
+ elif kdur.get("juju-bundle"):
+ kdumodel = kdur["juju-bundle"]
+ k8sclustertype = "juju-bundle"
+ else:
+ raise LcmException(
+ "kdu type for kdu='{}.{}' is neither helm-chart nor "
+ "juju-bundle. Maybe an old NBI version is running".format(
+ vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
+ )
+ )
+ # check if kdumodel is a file and exists
+ try:
+ vnfd_with_id = find_in_list(
+ db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
+ )
+ storage = deep_get(vnfd_with_id, ("_admin", "storage"))
+ if storage: # may be not present if vnfd has not artifacts
+ # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
+ if storage["pkg-dir"]:
+ filename = "{}/{}/{}s/{}".format(
+ storage["folder"],
+ storage["pkg-dir"],
+ k8sclustertype,
+ kdumodel,
+ )
+ else:
+ filename = "{}/Scripts/{}s/{}".format(
+ storage["folder"],
+ k8sclustertype,
+ kdumodel,
+ )
+ if self.fs.file_exists(
+ filename, mode="file"
+ ) or self.fs.file_exists(filename, mode="dir"):
+ kdumodel = self.fs.path + filename
+ except (asyncio.TimeoutError, asyncio.CancelledError):
+ raise
+ except Exception: # it is not a file
+ pass
+
+ k8s_cluster_id = kdur["k8s-cluster"]["id"]
+ step = "Synchronize repos for k8s cluster '{}'".format(
+ k8s_cluster_id
+ )
+ cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
+
+ # Synchronize repos
+ if (
+ k8sclustertype == "helm-chart"
+ and cluster_uuid not in updated_cluster_list
+ ) or (
+ k8sclustertype == "helm-chart-v3"
+ and cluster_uuid not in updated_v3_cluster_list
+ ):
+ del_repo_list, added_repo_dict = await asyncio.ensure_future(
+ self.k8scluster_map[k8sclustertype].synchronize_repos(
+ cluster_uuid=cluster_uuid
+ )
+ )
+ if del_repo_list or added_repo_dict:
+ if k8sclustertype == "helm-chart":
+ unset = {
+ "_admin.helm_charts_added." + item: None
+ for item in del_repo_list
+ }
+ updated = {
+ "_admin.helm_charts_added." + item: name
+ for item, name in added_repo_dict.items()
+ }
+ updated_cluster_list.append(cluster_uuid)
+ elif k8sclustertype == "helm-chart-v3":
+ unset = {
+ "_admin.helm_charts_v3_added." + item: None
+ for item in del_repo_list
+ }
+ updated = {
+ "_admin.helm_charts_v3_added." + item: name
+ for item, name in added_repo_dict.items()
+ }
+ updated_v3_cluster_list.append(cluster_uuid)
+ self.logger.debug(
+ logging_text + "repos synchronized on k8s cluster "
+ "'{}' to_delete: {}, to_add: {}".format(
+ k8s_cluster_id, del_repo_list, added_repo_dict
+ )
+ )
+ self.db.set_one(
+ "k8sclusters",
+ {"_id": k8s_cluster_id},
+ updated,
+ unset=unset,
+ )
+
+ # Instantiate kdu
+ step = "Instantiating KDU {}.{} in k8s cluster {}".format(
+ vnfr_data["member-vnf-index-ref"],
+ kdur["kdu-name"],
+ k8s_cluster_id,
+ )
+ k8s_instance_info = {
+ "kdu-instance": None,
+ "k8scluster-uuid": cluster_uuid,
+ "k8scluster-type": k8sclustertype,
+ "member-vnf-index": vnfr_data["member-vnf-index-ref"],
+ "kdu-name": kdur["kdu-name"],
+ "kdu-model": kdumodel,
+ "namespace": namespace,
+ "kdu-deployment-name": kdu_deployment_name,
+ }
+ db_path = "_admin.deployed.K8s.{}".format(index)
+ db_nsr_update[db_path] = k8s_instance_info
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ vnfd_with_id = find_in_list(
+ db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
+ )
+ task = asyncio.ensure_future(
+ self._install_kdu(
+ nsr_id,
+ db_path,
+ vnfr_data,
+ kdu_index,
+ kdud,
+ vnfd_with_id,
+ k8s_instance_info,
+ k8params=desc_params,
+ timeout=1800,
+ vca_id=vca_id,
+ )
+ )
+ self.lcm_tasks.register(
+ "ns",
+ nsr_id,
+ nslcmop_id,
+ "instantiate_KDU-{}".format(index),
+ task,
+ )
+ task_instantiation_info[task] = "Deploying KDU {}".format(
+ kdur["kdu-name"]
+ )
+
+ index += 1
+
+ except (LcmException, asyncio.CancelledError):
+ raise
+ except Exception as e:
+ msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
+ if isinstance(e, (N2VCException, DbException)):
+ self.logger.error(logging_text + msg)
+ else:
+ self.logger.critical(logging_text + msg, exc_info=True)
+ raise LcmException(msg)
+ finally:
+ if db_nsr_update:
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+
+ def _deploy_n2vc(
+ self,
+ logging_text,
+ db_nsr,
+ db_vnfr,
+ nslcmop_id,
+ nsr_id,
+ nsi_id,
+ vnfd_id,
+ vdu_id,
+ kdu_name,
+ member_vnf_index,
+ vdu_index,
+ vdu_name,
+ deploy_params,
+ descriptor_config,
+ base_folder,
+ task_instantiation_info,
+ stage,
+ ):
+ # launch instantiate_N2VC in a asyncio task and register task object
+ # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
+ # if not found, create one entry and update database
+ # fill db_nsr._admin.deployed.VCA.<index>
+
+ self.logger.debug(
+ logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
+ )
+ if "execution-environment-list" in descriptor_config:
+ ee_list = descriptor_config.get("execution-environment-list", [])
+ elif "juju" in descriptor_config:
+ ee_list = [descriptor_config] # ns charms
+ else: # other types as script are not supported
+ ee_list = []
+
+ for ee_item in ee_list:
+ self.logger.debug(
+ logging_text
+ + "_deploy_n2vc ee_item juju={}, helm={}".format(
+ ee_item.get("juju"), ee_item.get("helm-chart")
+ )
+ )
+ ee_descriptor_id = ee_item.get("id")
+ if ee_item.get("juju"):
+ vca_name = ee_item["juju"].get("charm")
+ vca_type = (
+ "lxc_proxy_charm"
+ if ee_item["juju"].get("charm") is not None
+ else "native_charm"
+ )
+ if ee_item["juju"].get("cloud") == "k8s":
+ vca_type = "k8s_proxy_charm"
+ elif ee_item["juju"].get("proxy") is False:
+ vca_type = "native_charm"
+ elif ee_item.get("helm-chart"):
+ vca_name = ee_item["helm-chart"]
+ if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
+ vca_type = "helm"
+ else:
+ vca_type = "helm-v3"
+ else:
+ self.logger.debug(
+ logging_text + "skipping non juju neither charm configuration"
+ )
+ continue
+
+ vca_index = -1
+ for vca_index, vca_deployed in enumerate(
+ db_nsr["_admin"]["deployed"]["VCA"]
+ ):
+ if not vca_deployed:
+ continue
+ if (
+ vca_deployed.get("member-vnf-index") == member_vnf_index
+ and vca_deployed.get("vdu_id") == vdu_id
+ and vca_deployed.get("kdu_name") == kdu_name
+ and vca_deployed.get("vdu_count_index", 0) == vdu_index
+ and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
+ ):
+ break
+ else:
+ # not found, create one.
+ target = (
+ "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
+ )
+ if vdu_id:
+ target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
+ elif kdu_name:
+ target += "/kdu/{}".format(kdu_name)
+ vca_deployed = {
+ "target_element": target,
+ # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
+ "member-vnf-index": member_vnf_index,
+ "vdu_id": vdu_id,
+ "kdu_name": kdu_name,
+ "vdu_count_index": vdu_index,
+ "operational-status": "init", # TODO revise
+ "detailed-status": "", # TODO revise
+ "step": "initial-deploy", # TODO revise
+ "vnfd_id": vnfd_id,
+ "vdu_name": vdu_name,
+ "type": vca_type,
+ "ee_descriptor_id": ee_descriptor_id,
+ }
+ vca_index += 1
+
+ # create VCA and configurationStatus in db
+ db_dict = {
+ "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
+ "configurationStatus.{}".format(vca_index): dict(),
+ }
+ self.update_db_2("nsrs", nsr_id, db_dict)
+
+ db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
+
+ self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
+ self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
+ self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
+
+ # Launch task
+ task_n2vc = asyncio.ensure_future(
+ self.instantiate_N2VC(
+ logging_text=logging_text,
+ vca_index=vca_index,
+ nsi_id=nsi_id,
+ db_nsr=db_nsr,
+ db_vnfr=db_vnfr,
+ vdu_id=vdu_id,
+ kdu_name=kdu_name,
+ vdu_index=vdu_index,
+ deploy_params=deploy_params,
+ config_descriptor=descriptor_config,
+ base_folder=base_folder,
+ nslcmop_id=nslcmop_id,
+ stage=stage,
+ vca_type=vca_type,
+ vca_name=vca_name,
+ ee_config_descriptor=ee_item,
+ )
+ )
+ self.lcm_tasks.register(
+ "ns",
+ nsr_id,
+ nslcmop_id,
+ "instantiate_N2VC-{}".format(vca_index),
+ task_n2vc,
+ )
+ task_instantiation_info[
+ task_n2vc
+ ] = self.task_name_deploy_vca + " {}.{}".format(
+ member_vnf_index or "", vdu_id or ""
+ )
+
+ @staticmethod
+ def _create_nslcmop(nsr_id, operation, params):
+ """
+ Creates a ns-lcm-opp content to be stored at database.
+ :param nsr_id: internal id of the instance
+ :param operation: instantiate, terminate, scale, action, ...
+ :param params: user parameters for the operation
+ :return: dictionary following SOL005 format
+ """
+ # Raise exception if invalid arguments
+ if not (nsr_id and operation and params):
+ raise LcmException(
+ "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
+ )
+ now = time()
+ _id = str(uuid4())
+ nslcmop = {
+ "id": _id,
+ "_id": _id,
+ # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
+ "operationState": "PROCESSING",
+ "statusEnteredTime": now,
+ "nsInstanceId": nsr_id,
+ "lcmOperationType": operation,
+ "startTime": now,
+ "isAutomaticInvocation": False,
+ "operationParams": params,
+ "isCancelPending": False,
+ "links": {
+ "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
+ "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
+ },
+ }
+ return nslcmop
+
+ def _format_additional_params(self, params):
+ params = params or {}
+ for key, value in params.items():
+ if str(value).startswith("!!yaml "):
+ params[key] = yaml.safe_load(value[7:])
+ return params
+
+ def _get_terminate_primitive_params(self, seq, vnf_index):
+ primitive = seq.get("name")
+ primitive_params = {}
+ params = {
+ "member_vnf_index": vnf_index,
+ "primitive": primitive,
+ "primitive_params": primitive_params,
+ }
+ desc_params = {}
+ return self._map_primitive_params(seq, params, desc_params)
+
+ # sub-operations
+
+ def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
+ op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
+ if op.get("operationState") == "COMPLETED":
+ # b. Skip sub-operation
+ # _ns_execute_primitive() or RO.create_action() will NOT be executed
+ return self.SUBOPERATION_STATUS_SKIP
+ else:
+ # c. retry executing sub-operation
+ # The sub-operation exists, and operationState != 'COMPLETED'
+ # Update operationState = 'PROCESSING' to indicate a retry.
+ operationState = "PROCESSING"
+ detailed_status = "In progress"
+ self._update_suboperation_status(
+ db_nslcmop, op_index, operationState, detailed_status
+ )
+ # Return the sub-operation index
+ # _ns_execute_primitive() or RO.create_action() will be called from scale()
+ # with arguments extracted from the sub-operation
+ return op_index
+
+ # Find a sub-operation where all keys in a matching dictionary must match
+ # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
+ def _find_suboperation(self, db_nslcmop, match):
+ if db_nslcmop and match:
+ op_list = db_nslcmop.get("_admin", {}).get("operations", [])
+ for i, op in enumerate(op_list):
+ if all(op.get(k) == match[k] for k in match):
+ return i
+ return self.SUBOPERATION_STATUS_NOT_FOUND
+
+ # Update status for a sub-operation given its index
+ def _update_suboperation_status(
+ self, db_nslcmop, op_index, operationState, detailed_status
+ ):
+ # Update DB for HA tasks
+ q_filter = {"_id": db_nslcmop["_id"]}
+ update_dict = {
+ "_admin.operations.{}.operationState".format(op_index): operationState,
+ "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
+ }
+ self.db.set_one(
+ "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
+ )
+
+ # Add sub-operation, return the index of the added sub-operation
+ # Optionally, set operationState, detailed-status, and operationType
+ # Status and type are currently set for 'scale' sub-operations:
+ # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
+ # 'detailed-status' : status message
+ # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
+ # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
+ def _add_suboperation(
+ self,
+ db_nslcmop,
+ vnf_index,
+ vdu_id,
+ vdu_count_index,
+ vdu_name,
+ primitive,
+ mapped_primitive_params,
+ operationState=None,
+ detailed_status=None,
+ operationType=None,
+ RO_nsr_id=None,
+ RO_scaling_info=None,
+ ):
+ if not db_nslcmop:
+ return self.SUBOPERATION_STATUS_NOT_FOUND
+ # Get the "_admin.operations" list, if it exists
+ db_nslcmop_admin = db_nslcmop.get("_admin", {})
+ op_list = db_nslcmop_admin.get("operations")
+ # Create or append to the "_admin.operations" list
+ new_op = {
+ "member_vnf_index": vnf_index,
+ "vdu_id": vdu_id,
+ "vdu_count_index": vdu_count_index,
+ "primitive": primitive,
+ "primitive_params": mapped_primitive_params,
+ }
+ if operationState:
+ new_op["operationState"] = operationState
+ if detailed_status:
+ new_op["detailed-status"] = detailed_status
+ if operationType:
+ new_op["lcmOperationType"] = operationType
+ if RO_nsr_id:
+ new_op["RO_nsr_id"] = RO_nsr_id
+ if RO_scaling_info:
+ new_op["RO_scaling_info"] = RO_scaling_info
+ if not op_list:
+ # No existing operations, create key 'operations' with current operation as first list element
+ db_nslcmop_admin.update({"operations": [new_op]})
+ op_list = db_nslcmop_admin.get("operations")
+ else:
+ # Existing operations, append operation to list
+ op_list.append(new_op)
+
+ db_nslcmop_update = {"_admin.operations": op_list}
+ self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
+ op_index = len(op_list) - 1
+ return op_index
+
+ # Helper methods for scale() sub-operations
+
+ # pre-scale/post-scale:
+ # Check for 3 different cases:
+ # a. New: First time execution, return SUBOPERATION_STATUS_NEW
+ # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
+ # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
+ def _check_or_add_scale_suboperation(
+ self,
+ db_nslcmop,
+ vnf_index,
+ vnf_config_primitive,
+ primitive_params,
+ operationType,
+ RO_nsr_id=None,
+ RO_scaling_info=None,
+ ):
+ # Find this sub-operation
+ if RO_nsr_id and RO_scaling_info:
+ operationType = "SCALE-RO"
+ match = {
+ "member_vnf_index": vnf_index,
+ "RO_nsr_id": RO_nsr_id,
+ "RO_scaling_info": RO_scaling_info,
+ }
+ else:
+ match = {
+ "member_vnf_index": vnf_index,
+ "primitive": vnf_config_primitive,
+ "primitive_params": primitive_params,
+ "lcmOperationType": operationType,
+ }
+ op_index = self._find_suboperation(db_nslcmop, match)
+ if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
+ # a. New sub-operation
+ # The sub-operation does not exist, add it.
+ # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
+ # The following parameters are set to None for all kind of scaling:
+ vdu_id = None
+ vdu_count_index = None
+ vdu_name = None
+ if RO_nsr_id and RO_scaling_info:
+ vnf_config_primitive = None
+ primitive_params = None
+ else:
+ RO_nsr_id = None
+ RO_scaling_info = None
+ # Initial status for sub-operation
+ operationState = "PROCESSING"
+ detailed_status = "In progress"
+ # Add sub-operation for pre/post-scaling (zero or more operations)
+ self._add_suboperation(
+ db_nslcmop,
+ vnf_index,
+ vdu_id,
+ vdu_count_index,
+ vdu_name,
+ vnf_config_primitive,
+ primitive_params,
+ operationState,
+ detailed_status,
+ operationType,
+ RO_nsr_id,
+ RO_scaling_info,
+ )
+ return self.SUBOPERATION_STATUS_NEW
+ else:
+ # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
+ # or op_index (operationState != 'COMPLETED')
+ return self._retry_or_skip_suboperation(db_nslcmop, op_index)
+
+ # Function to return execution_environment id
+
+ def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
+ # TODO vdu_index_count
+ for vca in vca_deployed_list:
+ if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
+ return vca["ee_id"]
+
+ async def destroy_N2VC(
+ self,
+ logging_text,
+ db_nslcmop,
+ vca_deployed,
+ config_descriptor,
+ vca_index,
+ destroy_ee=True,
+ exec_primitives=True,
+ scaling_in=False,
+ vca_id: str = None,
+ ):
+ """
+ Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
+ :param logging_text:
+ :param db_nslcmop:
+ :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
+ :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
+ :param vca_index: index in the database _admin.deployed.VCA
+ :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
+ :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
+ not executed properly
+ :param scaling_in: True destroys the application, False destroys the model
+ :return: None or exception
+ """
+
+ self.logger.debug(
+ logging_text
+ + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
+ vca_index, vca_deployed, config_descriptor, destroy_ee
+ )
+ )
+
+ vca_type = vca_deployed.get("type", "lxc_proxy_charm")
+
+ # execute terminate_primitives
+ if exec_primitives:
+ terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
+ config_descriptor.get("terminate-config-primitive"),
+ vca_deployed.get("ee_descriptor_id"),
+ )
+ vdu_id = vca_deployed.get("vdu_id")
+ vdu_count_index = vca_deployed.get("vdu_count_index")
+ vdu_name = vca_deployed.get("vdu_name")
+ vnf_index = vca_deployed.get("member-vnf-index")
+ if terminate_primitives and vca_deployed.get("needed_terminate"):
+ for seq in terminate_primitives:
+ # For each sequence in list, get primitive and call _ns_execute_primitive()
+ step = "Calling terminate action for vnf_member_index={} primitive={}".format(
+ vnf_index, seq.get("name")
+ )
+ self.logger.debug(logging_text + step)
+ # Create the primitive for each sequence, i.e. "primitive": "touch"
+ primitive = seq.get("name")
+ mapped_primitive_params = self._get_terminate_primitive_params(
+ seq, vnf_index
+ )
+
+ # Add sub-operation
+ self._add_suboperation(
+ db_nslcmop,
+ vnf_index,
+ vdu_id,
+ vdu_count_index,
+ vdu_name,
+ primitive,
+ mapped_primitive_params,
+ )
+ # Sub-operations: Call _ns_execute_primitive() instead of action()
+ try:
+ result, result_detail = await self._ns_execute_primitive(
+ vca_deployed["ee_id"],
+ primitive,
+ mapped_primitive_params,
+ vca_type=vca_type,
+ vca_id=vca_id,
+ )
+ except LcmException:
+ # this happens when VCA is not deployed. In this case it is not needed to terminate
+ continue
+ result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
+ if result not in result_ok:
+ raise LcmException(
+ "terminate_primitive {} for vnf_member_index={} fails with "
+ "error {}".format(seq.get("name"), vnf_index, result_detail)
+ )
+ # set that this VCA do not need terminated
+ db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
+ vca_index
+ )
+ self.update_db_2(
+ "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
+ )
+
+ # Delete Prometheus Jobs if any
+ # This uses NSR_ID, so it will destroy any jobs under this index
+ self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
+
+ if destroy_ee:
+ await self.vca_map[vca_type].delete_execution_environment(
+ vca_deployed["ee_id"],
+ scaling_in=scaling_in,
+ vca_type=vca_type,
+ vca_id=vca_id,
+ )
+
+ async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
+ self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
+ namespace = "." + db_nsr["_id"]
+ try:
+ await self.n2vc.delete_namespace(
+ namespace=namespace,
+ total_timeout=self.timeout_charm_delete,
+ vca_id=vca_id,
+ )
+ except N2VCNotFound: # already deleted. Skip
+ pass
+ self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
+
+ async def _terminate_RO(
+ self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
+ ):
+ """
+ Terminates a deployment from RO
+ :param logging_text:
+ :param nsr_deployed: db_nsr._admin.deployed
+ :param nsr_id:
+ :param nslcmop_id:
+ :param stage: list of string with the content to write on db_nslcmop.detailed-status.
+ this method will update only the index 2, but it will write on database the concatenated content of the list
+ :return:
+ """
+ db_nsr_update = {}
+ failed_detail = []
+ ro_nsr_id = ro_delete_action = None
+ if nsr_deployed and nsr_deployed.get("RO"):
+ ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
+ ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
+ try:
+ if ro_nsr_id:
+ stage[2] = "Deleting ns from VIM."
+ db_nsr_update["detailed-status"] = " ".join(stage)
+ self._write_op_status(nslcmop_id, stage)
+ self.logger.debug(logging_text + stage[2])
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ self._write_op_status(nslcmop_id, stage)
+ desc = await self.RO.delete("ns", ro_nsr_id)
+ ro_delete_action = desc["action_id"]
+ db_nsr_update[
+ "_admin.deployed.RO.nsr_delete_action_id"
+ ] = ro_delete_action
+ db_nsr_update["_admin.deployed.RO.nsr_id"] = None
+ db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
+ if ro_delete_action:
+ # wait until NS is deleted from VIM
+ stage[2] = "Waiting ns deleted from VIM."
+ detailed_status_old = None
+ self.logger.debug(
+ logging_text
+ + stage[2]
+ + " RO_id={} ro_delete_action={}".format(
+ ro_nsr_id, ro_delete_action
+ )
+ )
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ self._write_op_status(nslcmop_id, stage)
+
+ delete_timeout = 20 * 60 # 20 minutes
+ while delete_timeout > 0:
+ desc = await self.RO.show(
+ "ns",
+ item_id_name=ro_nsr_id,
+ extra_item="action",
+ extra_item_id=ro_delete_action,
+ )
+
+ # deploymentStatus
+ self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
+
+ ns_status, ns_status_info = self.RO.check_action_status(desc)
+ if ns_status == "ERROR":
+ raise ROclient.ROClientException(ns_status_info)
+ elif ns_status == "BUILD":
+ stage[2] = "Deleting from VIM {}".format(ns_status_info)
+ elif ns_status == "ACTIVE":
+ db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
+ db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
+ break
+ else:
+ assert (
+ False
+ ), "ROclient.check_action_status returns unknown {}".format(
+ ns_status
+ )
+ if stage[2] != detailed_status_old:
+ detailed_status_old = stage[2]
+ db_nsr_update["detailed-status"] = " ".join(stage)
+ self._write_op_status(nslcmop_id, stage)
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ await asyncio.sleep(5, loop=self.loop)
+ delete_timeout -= 5
+ else: # delete_timeout <= 0:
+ raise ROclient.ROClientException(
+ "Timeout waiting ns deleted from VIM"
+ )
+
+ except Exception as e:
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ if (
+ isinstance(e, ROclient.ROClientException) and e.http_code == 404
+ ): # not found
+ db_nsr_update["_admin.deployed.RO.nsr_id"] = None
+ db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
+ db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
+ self.logger.debug(
+ logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
+ )
+ elif (
+ isinstance(e, ROclient.ROClientException) and e.http_code == 409
+ ): # conflict
+ failed_detail.append("delete conflict: {}".format(e))
+ self.logger.debug(
+ logging_text
+ + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
+ )
+ else:
+ failed_detail.append("delete error: {}".format(e))
+ self.logger.error(
+ logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
+ )
+
+ # Delete nsd
+ if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
+ ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
+ try:
+ stage[2] = "Deleting nsd from RO."
+ db_nsr_update["detailed-status"] = " ".join(stage)
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ self._write_op_status(nslcmop_id, stage)
+ await self.RO.delete("nsd", ro_nsd_id)
+ self.logger.debug(
+ logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
+ )
+ db_nsr_update["_admin.deployed.RO.nsd_id"] = None
+ except Exception as e:
+ if (
+ isinstance(e, ROclient.ROClientException) and e.http_code == 404
+ ): # not found
+ db_nsr_update["_admin.deployed.RO.nsd_id"] = None
+ self.logger.debug(
+ logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
+ )
+ elif (
+ isinstance(e, ROclient.ROClientException) and e.http_code == 409
+ ): # conflict
+ failed_detail.append(
+ "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
+ )
+ self.logger.debug(logging_text + failed_detail[-1])
+ else:
+ failed_detail.append(
+ "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
+ )
+ self.logger.error(logging_text + failed_detail[-1])
+
+ if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
+ for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
+ if not vnf_deployed or not vnf_deployed["id"]:
+ continue
+ try:
+ ro_vnfd_id = vnf_deployed["id"]
+ stage[
+ 2
+ ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
+ vnf_deployed["member-vnf-index"], ro_vnfd_id
+ )
+ db_nsr_update["detailed-status"] = " ".join(stage)
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ self._write_op_status(nslcmop_id, stage)
+ await self.RO.delete("vnfd", ro_vnfd_id)
+ self.logger.debug(
+ logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
+ )
+ db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
+ except Exception as e:
+ if (
+ isinstance(e, ROclient.ROClientException) and e.http_code == 404
+ ): # not found
+ db_nsr_update[
+ "_admin.deployed.RO.vnfd.{}.id".format(index)
+ ] = None
+ self.logger.debug(
+ logging_text
+ + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
+ )
+ elif (
+ isinstance(e, ROclient.ROClientException) and e.http_code == 409
+ ): # conflict
+ failed_detail.append(
+ "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
+ )
+ self.logger.debug(logging_text + failed_detail[-1])
+ else:
+ failed_detail.append(
+ "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
+ )
+ self.logger.error(logging_text + failed_detail[-1])
+
+ if failed_detail:
+ stage[2] = "Error deleting from VIM"
+ else:
+ stage[2] = "Deleted from VIM"
+ db_nsr_update["detailed-status"] = " ".join(stage)
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ self._write_op_status(nslcmop_id, stage)
+
+ if failed_detail:
+ raise LcmException("; ".join(failed_detail))
+
+ async def terminate(self, nsr_id, nslcmop_id):
+ # Try to lock HA task here
+ task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
+ if not task_is_locked_by_me:
+ return
+
+ logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
+ self.logger.debug(logging_text + "Enter")
+ timeout_ns_terminate = self.timeout_ns_terminate
+ db_nsr = None
+ db_nslcmop = None
+ operation_params = None
+ exc = None
+ error_list = [] # annotates all failed error messages
+ db_nslcmop_update = {}
+ autoremove = False # autoremove after terminated
+ tasks_dict_info = {}
+ db_nsr_update = {}
+ stage = [
+ "Stage 1/3: Preparing task.",
+ "Waiting for previous operations to terminate.",
+ "",
+ ]
+ # ^ contains [stage, step, VIM-status]
+ try:
+ # wait for any previous tasks in process
+ await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
+
+ stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
+ db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
+ operation_params = db_nslcmop.get("operationParams") or {}
+ if operation_params.get("timeout_ns_terminate"):
+ timeout_ns_terminate = operation_params["timeout_ns_terminate"]
+ stage[1] = "Getting nsr={} from db.".format(nsr_id)
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+
+ db_nsr_update["operational-status"] = "terminating"
+ db_nsr_update["config-status"] = "terminating"
+ self._write_ns_status(
+ nsr_id=nsr_id,
+ ns_state="TERMINATING",
+ current_operation="TERMINATING",
+ current_operation_id=nslcmop_id,
+ other_update=db_nsr_update,
+ )
+ self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
+ nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
+ if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
+ return
+
+ stage[1] = "Getting vnf descriptors from db."
+ db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
+ db_vnfrs_dict = {
+ db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
+ }
+ db_vnfds_from_id = {}
+ db_vnfds_from_member_index = {}
+ # Loop over VNFRs
+ for vnfr in db_vnfrs_list:
+ vnfd_id = vnfr["vnfd-id"]
+ if vnfd_id not in db_vnfds_from_id:
+ vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
+ db_vnfds_from_id[vnfd_id] = vnfd
+ db_vnfds_from_member_index[
+ vnfr["member-vnf-index-ref"]
+ ] = db_vnfds_from_id[vnfd_id]
+
+ # Destroy individual execution environments when there are terminating primitives.
+ # Rest of EE will be deleted at once
+ # TODO - check before calling _destroy_N2VC
+ # if not operation_params.get("skip_terminate_primitives"):#
+ # or not vca.get("needed_terminate"):
+ stage[0] = "Stage 2/3 execute terminating primitives."
+ self.logger.debug(logging_text + stage[0])
+ stage[1] = "Looking execution environment that needs terminate."
+ self.logger.debug(logging_text + stage[1])
+
+ for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
+ config_descriptor = None
+ vca_member_vnf_index = vca.get("member-vnf-index")
+ vca_id = self.get_vca_id(
+ db_vnfrs_dict.get(vca_member_vnf_index)
+ if vca_member_vnf_index
+ else None,
+ db_nsr,
+ )
+ if not vca or not vca.get("ee_id"):
+ continue
+ if not vca.get("member-vnf-index"):
+ # ns
+ config_descriptor = db_nsr.get("ns-configuration")
+ elif vca.get("vdu_id"):
+ db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
+ config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
+ elif vca.get("kdu_name"):
+ db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
+ config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
+ else:
+ db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
+ config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
+ vca_type = vca.get("type")
+ exec_terminate_primitives = not operation_params.get(
+ "skip_terminate_primitives"
+ ) and vca.get("needed_terminate")
+ # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
+ # pending native charms
+ destroy_ee = (
+ True if vca_type in ("helm", "helm-v3", "native_charm") else False
+ )
+ # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
+ # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
+ task = asyncio.ensure_future(
+ self.destroy_N2VC(
+ logging_text,
+ db_nslcmop,
+ vca,
+ config_descriptor,
+ vca_index,
+ destroy_ee,
+ exec_terminate_primitives,
+ vca_id=vca_id,
+ )
+ )
+ tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
+
+ # wait for pending tasks of terminate primitives
+ if tasks_dict_info:
+ self.logger.debug(
+ logging_text
+ + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
+ )
+ error_list = await self._wait_for_tasks(
+ logging_text,
+ tasks_dict_info,
+ min(self.timeout_charm_delete, timeout_ns_terminate),
+ stage,
+ nslcmop_id,
+ )
+ tasks_dict_info.clear()
+ if error_list:
+ return # raise LcmException("; ".join(error_list))
+
+ # remove All execution environments at once
+ stage[0] = "Stage 3/3 delete all."
+
+ if nsr_deployed.get("VCA"):
+ stage[1] = "Deleting all execution environments."
+ self.logger.debug(logging_text + stage[1])
+ vca_id = self.get_vca_id({}, db_nsr)
+ task_delete_ee = asyncio.ensure_future(
+ asyncio.wait_for(
+ self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
+ timeout=self.timeout_charm_delete,
+ )
+ )
+ # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
+ tasks_dict_info[task_delete_ee] = "Terminating all VCA"
+
+ # Delete from k8scluster
+ stage[1] = "Deleting KDUs."
+ self.logger.debug(logging_text + stage[1])
+ # print(nsr_deployed)
+ for kdu in get_iterable(nsr_deployed, "K8s"):
+ if not kdu or not kdu.get("kdu-instance"):
+ continue
+ kdu_instance = kdu.get("kdu-instance")
+ if kdu.get("k8scluster-type") in self.k8scluster_map:
+ # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
+ vca_id = self.get_vca_id({}, db_nsr)
+ task_delete_kdu_instance = asyncio.ensure_future(
+ self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
+ cluster_uuid=kdu.get("k8scluster-uuid"),
+ kdu_instance=kdu_instance,
+ vca_id=vca_id,
+ namespace=kdu.get("namespace"),
+ )
+ )
+ else:
+ self.logger.error(
+ logging_text
+ + "Unknown k8s deployment type {}".format(
+ kdu.get("k8scluster-type")
+ )
+ )
+ continue
+ tasks_dict_info[
+ task_delete_kdu_instance
+ ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
+
+ # remove from RO
+ stage[1] = "Deleting ns from VIM."
+ if self.ng_ro:
+ task_delete_ro = asyncio.ensure_future(
+ self._terminate_ng_ro(
+ logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
+ )
+ )
+ else:
+ task_delete_ro = asyncio.ensure_future(
+ self._terminate_RO(
+ logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
+ )
+ )
+ tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
+
+ # rest of staff will be done at finally
+
+ except (
+ ROclient.ROClientException,
+ DbException,
+ LcmException,
+ N2VCException,
+ ) as e:
+ self.logger.error(logging_text + "Exit Exception {}".format(e))
+ exc = e
+ except asyncio.CancelledError:
+ self.logger.error(
+ logging_text + "Cancelled Exception while '{}'".format(stage[1])
+ )
+ exc = "Operation was cancelled"
+ except Exception as e:
+ exc = traceback.format_exc()
+ self.logger.critical(
+ logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
+ exc_info=True,
+ )
+ finally:
+ if exc:
+ error_list.append(str(exc))
+ try:
+ # wait for pending tasks
+ if tasks_dict_info:
+ stage[1] = "Waiting for terminate pending tasks."
+ self.logger.debug(logging_text + stage[1])
+ error_list += await self._wait_for_tasks(
+ logging_text,
+ tasks_dict_info,
+ timeout_ns_terminate,
+ stage,
+ nslcmop_id,
+ )
+ stage[1] = stage[2] = ""
+ except asyncio.CancelledError:
+ error_list.append("Cancelled")
+ # TODO cancell all tasks
+ except Exception as exc:
+ error_list.append(str(exc))
+ # update status at database
+ if error_list:
+ error_detail = "; ".join(error_list)
+ # self.logger.error(logging_text + error_detail)
+ error_description_nslcmop = "{} Detail: {}".format(
+ stage[0], error_detail
+ )
+ error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
+ nslcmop_id, stage[0]
+ )
+
+ db_nsr_update["operational-status"] = "failed"
+ db_nsr_update["detailed-status"] = (
+ error_description_nsr + " Detail: " + error_detail
+ )
+ db_nslcmop_update["detailed-status"] = error_detail
+ nslcmop_operation_state = "FAILED"
+ ns_state = "BROKEN"
+ else:
+ error_detail = None
+ error_description_nsr = error_description_nslcmop = None
+ ns_state = "NOT_INSTANTIATED"
+ db_nsr_update["operational-status"] = "terminated"
+ db_nsr_update["detailed-status"] = "Done"
+ db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
+ db_nslcmop_update["detailed-status"] = "Done"
+ nslcmop_operation_state = "COMPLETED"
+
+ if db_nsr:
+ self._write_ns_status(
+ nsr_id=nsr_id,
+ ns_state=ns_state,
+ current_operation="IDLE",
+ current_operation_id=None,
+ error_description=error_description_nsr,
+ error_detail=error_detail,
+ other_update=db_nsr_update,
+ )
+ self._write_op_status(
+ op_id=nslcmop_id,
+ stage="",
+ error_message=error_description_nslcmop,
+ operation_state=nslcmop_operation_state,
+ other_update=db_nslcmop_update,
+ )
+ if ns_state == "NOT_INSTANTIATED":
+ try:
+ self.db.set_list(
+ "vnfrs",
+ {"nsr-id-ref": nsr_id},
+ {"_admin.nsState": "NOT_INSTANTIATED"},
+ )
+ except DbException as e:
+ self.logger.warn(
+ logging_text
+ + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
+ nsr_id, e
+ )
+ )
+ if operation_params:
+ autoremove = operation_params.get("autoremove", False)
+ if nslcmop_operation_state:
+ try:
+ await self.msg.aiowrite(
+ "ns",
+ "terminated",
+ {
+ "nsr_id": nsr_id,
+ "nslcmop_id": nslcmop_id,
+ "operationState": nslcmop_operation_state,
+ "autoremove": autoremove,
+ },
+ loop=self.loop,
+ )
+ except Exception as e:
+ self.logger.error(
+ logging_text + "kafka_write notification Exception {}".format(e)
+ )
+
+ self.logger.debug(logging_text + "Exit")
+ self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
+
+ async def _wait_for_tasks(
+ self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
+ ):
+ time_start = time()
+ error_detail_list = []
+ error_list = []
+ pending_tasks = list(created_tasks_info.keys())
+ num_tasks = len(pending_tasks)
+ num_done = 0
+ stage[1] = "{}/{}.".format(num_done, num_tasks)
+ self._write_op_status(nslcmop_id, stage)
+ while pending_tasks:
+ new_error = None
+ _timeout = timeout + time_start - time()
+ done, pending_tasks = await asyncio.wait(
+ pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
+ )
+ num_done += len(done)
+ if not done: # Timeout
+ for task in pending_tasks:
+ new_error = created_tasks_info[task] + ": Timeout"
+ error_detail_list.append(new_error)
+ error_list.append(new_error)
+ break
+ for task in done:
+ if task.cancelled():
+ exc = "Cancelled"
+ else:
+ exc = task.exception()
+ if exc:
+ if isinstance(exc, asyncio.TimeoutError):
+ exc = "Timeout"
+ new_error = created_tasks_info[task] + ": {}".format(exc)
+ error_list.append(created_tasks_info[task])
+ error_detail_list.append(new_error)
+ if isinstance(
+ exc,
+ (
+ str,
+ DbException,
+ N2VCException,
+ ROclient.ROClientException,
+ LcmException,
+ K8sException,
+ NgRoException,
+ ),
+ ):
+ self.logger.error(logging_text + new_error)
+ else:
+ exc_traceback = "".join(
+ traceback.format_exception(None, exc, exc.__traceback__)
+ )
+ self.logger.error(
+ logging_text
+ + created_tasks_info[task]
+ + " "
+ + exc_traceback
+ )
+ else:
+ self.logger.debug(
+ logging_text + created_tasks_info[task] + ": Done"
+ )
+ stage[1] = "{}/{}.".format(num_done, num_tasks)
+ if new_error:
+ stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
+ if nsr_id: # update also nsr
+ self.update_db_2(
+ "nsrs",
+ nsr_id,
+ {
+ "errorDescription": "Error at: " + ", ".join(error_list),
+ "errorDetail": ". ".join(error_detail_list),
+ },
+ )
+ self._write_op_status(nslcmop_id, stage)
+ return error_detail_list
+
+ @staticmethod
+ def _map_primitive_params(primitive_desc, params, instantiation_params):
+ """
+ Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
+ The default-value is used. If it is between < > it look for a value at instantiation_params
+ :param primitive_desc: portion of VNFD/NSD that describes primitive
+ :param params: Params provided by user
+ :param instantiation_params: Instantiation params provided by user
+ :return: a dictionary with the calculated params
+ """
+ calculated_params = {}
+ for parameter in primitive_desc.get("parameter", ()):
+ param_name = parameter["name"]
+ if param_name in params:
+ calculated_params[param_name] = params[param_name]
+ elif "default-value" in parameter or "value" in parameter:
+ if "value" in parameter:
+ calculated_params[param_name] = parameter["value"]
+ else:
+ calculated_params[param_name] = parameter["default-value"]
+ if (
+ isinstance(calculated_params[param_name], str)
+ and calculated_params[param_name].startswith("<")
+ and calculated_params[param_name].endswith(">")
+ ):
+ if calculated_params[param_name][1:-1] in instantiation_params:
+ calculated_params[param_name] = instantiation_params[
+ calculated_params[param_name][1:-1]
+ ]
+ else:
+ raise LcmException(
+ "Parameter {} needed to execute primitive {} not provided".format(
+ calculated_params[param_name], primitive_desc["name"]
+ )
+ )
+ else:
+ raise LcmException(
+ "Parameter {} needed to execute primitive {} not provided".format(
+ param_name, primitive_desc["name"]
+ )
+ )
+
+ if isinstance(calculated_params[param_name], (dict, list, tuple)):
+ calculated_params[param_name] = yaml.safe_dump(
+ calculated_params[param_name], default_flow_style=True, width=256
+ )
+ elif isinstance(calculated_params[param_name], str) and calculated_params[
+ param_name
+ ].startswith("!!yaml "):
+ calculated_params[param_name] = calculated_params[param_name][7:]
+ if parameter.get("data-type") == "INTEGER":
+ try:
+ calculated_params[param_name] = int(calculated_params[param_name])
+ except ValueError: # error converting string to int
+ raise LcmException(
+ "Parameter {} of primitive {} must be integer".format(
+ param_name, primitive_desc["name"]
+ )
+ )
+ elif parameter.get("data-type") == "BOOLEAN":
+ calculated_params[param_name] = not (
+ (str(calculated_params[param_name])).lower() == "false"
+ )
+
+ # add always ns_config_info if primitive name is config
+ if primitive_desc["name"] == "config":
+ if "ns_config_info" in instantiation_params:
+ calculated_params["ns_config_info"] = instantiation_params[
+ "ns_config_info"
+ ]
+ return calculated_params
+
+ def _look_for_deployed_vca(
+ self,
+ deployed_vca,
+ member_vnf_index,
+ vdu_id,
+ vdu_count_index,
+ kdu_name=None,
+ ee_descriptor_id=None,
+ ):
+ # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
+ for vca in deployed_vca:
+ if not vca:
+ continue
+ if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
+ continue
+ if (
+ vdu_count_index is not None
+ and vdu_count_index != vca["vdu_count_index"]
+ ):
+ continue
+ if kdu_name and kdu_name != vca["kdu_name"]:
+ continue
+ if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
+ continue
+ break
+ else:
+ # vca_deployed not found
+ raise LcmException(
+ "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
+ " is not deployed".format(
+ member_vnf_index,
+ vdu_id,
+ vdu_count_index,
+ kdu_name,
+ ee_descriptor_id,
+ )
+ )
+ # get ee_id
+ ee_id = vca.get("ee_id")
+ vca_type = vca.get(
+ "type", "lxc_proxy_charm"
+ ) # default value for backward compatibility - proxy charm
+ if not ee_id:
+ raise LcmException(
+ "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
+ "execution environment".format(
+ member_vnf_index, vdu_id, kdu_name, vdu_count_index
+ )
+ )
+ return ee_id, vca_type
+
+ async def _ns_execute_primitive(
+ self,
+ ee_id,
+ primitive,
+ primitive_params,
+ retries=0,
+ retries_interval=30,
+ timeout=None,
+ vca_type=None,
+ db_dict=None,
+ vca_id: str = None,
+ ) -> (str, str):
+ try:
+ if primitive == "config":
+ primitive_params = {"params": primitive_params}
+
+ vca_type = vca_type or "lxc_proxy_charm"
+
+ while retries >= 0:
+ try:
+ output = await asyncio.wait_for(
+ self.vca_map[vca_type].exec_primitive(
+ ee_id=ee_id,
+ primitive_name=primitive,
+ params_dict=primitive_params,
+ progress_timeout=self.timeout_progress_primitive,
+ total_timeout=self.timeout_primitive,
+ db_dict=db_dict,
+ vca_id=vca_id,
+ vca_type=vca_type,
+ ),
+ timeout=timeout or self.timeout_primitive,
+ )
+ # execution was OK
+ break
+ except asyncio.CancelledError:
+ raise
+ except Exception as e: # asyncio.TimeoutError
+ if isinstance(e, asyncio.TimeoutError):
+ e = "Timeout"
+ retries -= 1
+ if retries >= 0:
+ self.logger.debug(
+ "Error executing action {} on {} -> {}".format(
+ primitive, ee_id, e
+ )
+ )
+ # wait and retry
+ await asyncio.sleep(retries_interval, loop=self.loop)
+ else:
+ return "FAILED", str(e)
+
+ return "COMPLETED", output
+
+ except (LcmException, asyncio.CancelledError):
+ raise
+ except Exception as e:
+ return "FAIL", "Error executing action {}: {}".format(primitive, e)
+
+ async def vca_status_refresh(self, nsr_id, nslcmop_id):
+ """
+ Updating the vca_status with latest juju information in nsrs record
+ :param: nsr_id: Id of the nsr
+ :param: nslcmop_id: Id of the nslcmop
+ :return: None
+ """
+
+ self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ vca_id = self.get_vca_id({}, db_nsr)
+ if db_nsr["_admin"]["deployed"]["K8s"]:
+ for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
+ cluster_uuid, kdu_instance, cluster_type = (
+ k8s["k8scluster-uuid"],
+ k8s["kdu-instance"],
+ k8s["k8scluster-type"],
+ )
+ await self._on_update_k8s_db(
+ cluster_uuid=cluster_uuid,
+ kdu_instance=kdu_instance,
+ filter={"_id": nsr_id},
+ vca_id=vca_id,
+ cluster_type=cluster_type,
+ )
+ else:
+ for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
+ table, filter = "nsrs", {"_id": nsr_id}
+ path = "_admin.deployed.VCA.{}.".format(vca_index)
+ await self._on_update_n2vc_db(table, filter, path, {})
+
+ self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
+ self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
+
+ async def action(self, nsr_id, nslcmop_id):
+ # Try to lock HA task here
+ task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
+ if not task_is_locked_by_me:
+ return
+
+ logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
+ self.logger.debug(logging_text + "Enter")
+ # get all needed from database
+ db_nsr = None
+ db_nslcmop = None
+ db_nsr_update = {}
+ db_nslcmop_update = {}
+ nslcmop_operation_state = None
+ error_description_nslcmop = None
+ exc = None
+ try:
+ # wait for any previous tasks in process
+ step = "Waiting for previous operations to terminate"
+ await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
+
+ self._write_ns_status(
+ nsr_id=nsr_id,
+ ns_state=None,
+ current_operation="RUNNING ACTION",
+ current_operation_id=nslcmop_id,
+ )
+
+ step = "Getting information from database"
+ db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ if db_nslcmop["operationParams"].get("primitive_params"):
+ db_nslcmop["operationParams"]["primitive_params"] = json.loads(
+ db_nslcmop["operationParams"]["primitive_params"]
+ )
+
+ nsr_deployed = db_nsr["_admin"].get("deployed")
+ vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
+ vdu_id = db_nslcmop["operationParams"].get("vdu_id")
+ kdu_name = db_nslcmop["operationParams"].get("kdu_name")
+ vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
+ primitive = db_nslcmop["operationParams"]["primitive"]
+ primitive_params = db_nslcmop["operationParams"]["primitive_params"]
+ timeout_ns_action = db_nslcmop["operationParams"].get(
+ "timeout_ns_action", self.timeout_primitive
+ )
+
+ if vnf_index:
+ step = "Getting vnfr from database"
+ db_vnfr = self.db.get_one(
+ "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
+ )
+ if db_vnfr.get("kdur"):
+ kdur_list = []
+ for kdur in db_vnfr["kdur"]:
+ if kdur.get("additionalParams"):
+ kdur["additionalParams"] = json.loads(
+ kdur["additionalParams"]
+ )
+ kdur_list.append(kdur)
+ db_vnfr["kdur"] = kdur_list
+ step = "Getting vnfd from database"
+ db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
+
+ # Sync filesystem before running a primitive
+ self.fs.sync(db_vnfr["vnfd-id"])
+ else:
+ step = "Getting nsd from database"
+ db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
+
+ vca_id = self.get_vca_id(db_vnfr, db_nsr)
+ # for backward compatibility
+ if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
+ nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
+ db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+
+ # look for primitive
+ config_primitive_desc = descriptor_configuration = None
+ if vdu_id:
+ descriptor_configuration = get_configuration(db_vnfd, vdu_id)
+ elif kdu_name:
+ descriptor_configuration = get_configuration(db_vnfd, kdu_name)
+ elif vnf_index:
+ descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
+ else:
+ descriptor_configuration = db_nsd.get("ns-configuration")
+
+ if descriptor_configuration and descriptor_configuration.get(
+ "config-primitive"
+ ):
+ for config_primitive in descriptor_configuration["config-primitive"]:
+ if config_primitive["name"] == primitive:
+ config_primitive_desc = config_primitive
+ break
+
+ if not config_primitive_desc:
+ if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
+ raise LcmException(
+ "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
+ primitive
+ )
+ )
+ primitive_name = primitive
+ ee_descriptor_id = None
+ else:
+ primitive_name = config_primitive_desc.get(
+ "execution-environment-primitive", primitive
+ )
+ ee_descriptor_id = config_primitive_desc.get(
+ "execution-environment-ref"
+ )
+
+ if vnf_index:
+ if vdu_id:
+ vdur = next(
+ (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
+ )
+ desc_params = parse_yaml_strings(vdur.get("additionalParams"))
+ elif kdu_name:
+ kdur = next(
+ (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
+ )
+ desc_params = parse_yaml_strings(kdur.get("additionalParams"))
+ else:
+ desc_params = parse_yaml_strings(
+ db_vnfr.get("additionalParamsForVnf")
+ )
+ else:
+ desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
+ if kdu_name and get_configuration(db_vnfd, kdu_name):
+ kdu_configuration = get_configuration(db_vnfd, kdu_name)
+ actions = set()
+ for primitive in kdu_configuration.get("initial-config-primitive", []):
+ actions.add(primitive["name"])
+ for primitive in kdu_configuration.get("config-primitive", []):
+ actions.add(primitive["name"])
+ kdu = find_in_list(
+ nsr_deployed["K8s"],
+ lambda kdu: kdu_name == kdu["kdu-name"]
+ and kdu["member-vnf-index"] == vnf_index,
+ )
+ kdu_action = (
+ True
+ if primitive_name in actions
+ and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
+ else False
+ )
+
+ # TODO check if ns is in a proper status
+ if kdu_name and (
+ primitive_name in ("upgrade", "rollback", "status") or kdu_action
+ ):
+ # kdur and desc_params already set from before
+ if primitive_params:
+ desc_params.update(primitive_params)
+ # TODO Check if we will need something at vnf level
+ for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
+ if (
+ kdu_name == kdu["kdu-name"]
+ and kdu["member-vnf-index"] == vnf_index
+ ):
+ break
+ else:
+ raise LcmException(
+ "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
+ )
+
+ if kdu.get("k8scluster-type") not in self.k8scluster_map:
+ msg = "unknown k8scluster-type '{}'".format(
+ kdu.get("k8scluster-type")
+ )
+ raise LcmException(msg)
+
+ db_dict = {
+ "collection": "nsrs",
+ "filter": {"_id": nsr_id},
+ "path": "_admin.deployed.K8s.{}".format(index),
+ }
+ self.logger.debug(
+ logging_text
+ + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
+ )
+ step = "Executing kdu {}".format(primitive_name)
+ if primitive_name == "upgrade":
+ if desc_params.get("kdu_model"):
+ kdu_model = desc_params.get("kdu_model")
+ del desc_params["kdu_model"]
+ else:
+ kdu_model = kdu.get("kdu-model")
+ parts = kdu_model.split(sep=":")
+ if len(parts) == 2:
+ kdu_model = parts[0]
+
+ detailed_status = await asyncio.wait_for(
+ self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
+ cluster_uuid=kdu.get("k8scluster-uuid"),
+ kdu_instance=kdu.get("kdu-instance"),
+ atomic=True,
+ kdu_model=kdu_model,
+ params=desc_params,
+ db_dict=db_dict,
+ timeout=timeout_ns_action,
+ ),
+ timeout=timeout_ns_action + 10,
+ )
+ self.logger.debug(
+ logging_text + " Upgrade of kdu {} done".format(detailed_status)
+ )
+ elif primitive_name == "rollback":
+ detailed_status = await asyncio.wait_for(
+ self.k8scluster_map[kdu["k8scluster-type"]].rollback(
+ cluster_uuid=kdu.get("k8scluster-uuid"),
+ kdu_instance=kdu.get("kdu-instance"),
+ db_dict=db_dict,
+ ),
+ timeout=timeout_ns_action,
+ )
+ elif primitive_name == "status":
+ detailed_status = await asyncio.wait_for(
+ self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
+ cluster_uuid=kdu.get("k8scluster-uuid"),
+ kdu_instance=kdu.get("kdu-instance"),
+ vca_id=vca_id,
+ ),
+ timeout=timeout_ns_action,
+ )
+ else:
+ kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
+ kdu["kdu-name"], nsr_id
+ )
+ params = self._map_primitive_params(
+ config_primitive_desc, primitive_params, desc_params
+ )
+
+ detailed_status = await asyncio.wait_for(
+ self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
+ cluster_uuid=kdu.get("k8scluster-uuid"),
+ kdu_instance=kdu_instance,
+ primitive_name=primitive_name,
+ params=params,
+ db_dict=db_dict,
+ timeout=timeout_ns_action,
+ vca_id=vca_id,
+ ),
+ timeout=timeout_ns_action,
+ )
+
+ if detailed_status:
+ nslcmop_operation_state = "COMPLETED"
+ else:
+ detailed_status = ""
+ nslcmop_operation_state = "FAILED"
+ else:
+ ee_id, vca_type = self._look_for_deployed_vca(
+ nsr_deployed["VCA"],
+ member_vnf_index=vnf_index,
+ vdu_id=vdu_id,
+ vdu_count_index=vdu_count_index,
+ ee_descriptor_id=ee_descriptor_id,
+ )
+ for vca_index, vca_deployed in enumerate(
+ db_nsr["_admin"]["deployed"]["VCA"]
+ ):
+ if vca_deployed.get("member-vnf-index") == vnf_index:
+ db_dict = {
+ "collection": "nsrs",
+ "filter": {"_id": nsr_id},
+ "path": "_admin.deployed.VCA.{}.".format(vca_index),
+ }
+ break
+ (
+ nslcmop_operation_state,
+ detailed_status,
+ ) = await self._ns_execute_primitive(
+ ee_id,
+ primitive=primitive_name,
+ primitive_params=self._map_primitive_params(
+ config_primitive_desc, primitive_params, desc_params
+ ),
+ timeout=timeout_ns_action,
+ vca_type=vca_type,
+ db_dict=db_dict,
+ vca_id=vca_id,
+ )
+
+ db_nslcmop_update["detailed-status"] = detailed_status
+ error_description_nslcmop = (
+ detailed_status if nslcmop_operation_state == "FAILED" else ""
+ )
+ self.logger.debug(
+ logging_text
+ + " task Done with result {} {}".format(
+ nslcmop_operation_state, detailed_status
+ )
+ )
+ return # database update is called inside finally
+
+ except (DbException, LcmException, N2VCException, K8sException) as e:
+ self.logger.error(logging_text + "Exit Exception {}".format(e))
+ exc = e
+ except asyncio.CancelledError:
+ self.logger.error(
+ logging_text + "Cancelled Exception while '{}'".format(step)
+ )
+ exc = "Operation was cancelled"
+ except asyncio.TimeoutError:
+ self.logger.error(logging_text + "Timeout while '{}'".format(step))
+ exc = "Timeout"
+ except Exception as e:
+ exc = traceback.format_exc()
+ self.logger.critical(
+ logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
+ exc_info=True,
+ )
+ finally:
+ if exc:
+ db_nslcmop_update[
+ "detailed-status"
+ ] = (
+ detailed_status
+ ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
+ nslcmop_operation_state = "FAILED"
+ if db_nsr:
+ self._write_ns_status(
+ nsr_id=nsr_id,
+ ns_state=db_nsr[
+ "nsState"
+ ], # TODO check if degraded. For the moment use previous status
+ current_operation="IDLE",
+ current_operation_id=None,
+ # error_description=error_description_nsr,
+ # error_detail=error_detail,
+ other_update=db_nsr_update,
+ )
+
+ self._write_op_status(
+ op_id=nslcmop_id,
+ stage="",
+ error_message=error_description_nslcmop,
+ operation_state=nslcmop_operation_state,
+ other_update=db_nslcmop_update,
+ )
+
+ if nslcmop_operation_state:
+ try:
+ await self.msg.aiowrite(
+ "ns",
+ "actioned",
+ {
+ "nsr_id": nsr_id,
+ "nslcmop_id": nslcmop_id,
+ "operationState": nslcmop_operation_state,
+ },
+ loop=self.loop,
+ )
+ except Exception as e:
+ self.logger.error(
+ logging_text + "kafka_write notification Exception {}".format(e)
+ )
+ self.logger.debug(logging_text + "Exit")
+ self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
+ return nslcmop_operation_state, detailed_status
+
+ async def terminate_vdus(
+ self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
+ ):
+ """This method terminates VDUs
+
+ Args:
+ db_vnfr: VNF instance record
+ member_vnf_index: VNF index to identify the VDUs to be removed
+ db_nsr: NS instance record
+ update_db_nslcmops: Nslcmop update record
+ """
+ vca_scaling_info = []
+ scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
+ scaling_info["scaling_direction"] = "IN"
+ scaling_info["vdu-delete"] = {}
+ scaling_info["kdu-delete"] = {}
+ db_vdur = db_vnfr.get("vdur")
+ vdur_list = copy(db_vdur)
+ count_index = 0
+ for index, vdu in enumerate(vdur_list):
+ vca_scaling_info.append(
+ {
+ "osm_vdu_id": vdu["vdu-id-ref"],
+ "member-vnf-index": member_vnf_index,
+ "type": "delete",
+ "vdu_index": count_index,
+ })
+ scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
+ scaling_info["vdu"].append(
+ {
+ "name": vdu.get("name") or vdu.get("vdu-name"),
+ "vdu_id": vdu["vdu-id-ref"],
+ "interface": [],
+ })
+ for interface in vdu["interfaces"]:
+ scaling_info["vdu"][index]["interface"].append(
+ {
+ "name": interface["name"],
+ "ip_address": interface["ip-address"],
+ "mac_address": interface.get("mac-address"),
+ })
+ self.logger.info("NS update scaling info{}".format(scaling_info))
+ stage[2] = "Terminating VDUs"
+ if scaling_info.get("vdu-delete"):
+ # scale_process = "RO"
+ if self.ro_config.get("ng"):
+ await self._scale_ng_ro(
+ logging_text, db_nsr, update_db_nslcmops, db_vnfr, scaling_info, stage
+ )
+
+ async def remove_vnf(
+ self, nsr_id, nslcmop_id, vnf_instance_id
+ ):
+ """This method is to Remove VNF instances from NS.
+
+ Args:
+ nsr_id: NS instance id
+ nslcmop_id: nslcmop id of update
+ vnf_instance_id: id of the VNF instance to be removed
+
+ Returns:
+ result: (str, str) COMPLETED/FAILED, details
+ """
+ try:
+ db_nsr_update = {}
+ logging_text = "Task ns={} update ".format(nsr_id)
+ check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
+ self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
+ if check_vnfr_count > 1:
+ stage = ["", "", ""]
+ step = "Getting nslcmop from database"
+ self.logger.debug(step + " after having waited for previous tasks to be completed")
+ # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
+ member_vnf_index = db_vnfr["member-vnf-index-ref"]
+ """ db_vnfr = self.db.get_one(
+ "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
+
+ update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
+ await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
+
+ constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
+ constituent_vnfr.remove(db_vnfr.get("_id"))
+ db_nsr_update["constituent-vnfr-ref"] = db_nsr.get("constituent-vnfr-ref")
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ return "COMPLETED", "Done"
+ else:
+ step = "Terminate VNF Failed with"
+ raise LcmException("{} Cannot terminate the last VNF in this NS.".format(
+ vnf_instance_id))
+ except (LcmException, asyncio.CancelledError):
+ raise
+ except Exception as e:
+ self.logger.debug("Error removing VNF {}".format(e))
+ return "FAILED", "Error removing VNF {}".format(e)
+
+ async def _ns_redeploy_vnf(
+ self, nsr_id, nslcmop_id, db_vnfd, db_vnfr, db_nsr,
+ ):
+ """This method updates and redeploys VNF instances
+
+ Args:
+ nsr_id: NS instance id
+ nslcmop_id: nslcmop id
+ db_vnfd: VNF descriptor
+ db_vnfr: VNF instance record
+ db_nsr: NS instance record
+
+ Returns:
+ result: (str, str) COMPLETED/FAILED, details
+ """
+ try:
+ count_index = 0
+ stage = ["", "", ""]
+ logging_text = "Task ns={} update ".format(nsr_id)
+ latest_vnfd_revision = db_vnfd["_admin"].get("revision")
+ member_vnf_index = db_vnfr["member-vnf-index-ref"]
+
+ # Terminate old VNF resources
+ update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
+ await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
+
+ # old_vnfd_id = db_vnfr["vnfd-id"]
+ # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
+ new_db_vnfd = db_vnfd
+ # new_vnfd_ref = new_db_vnfd["id"]
+ # new_vnfd_id = vnfd_id
+
+ # Create VDUR
+ new_vnfr_cp = []
+ for cp in new_db_vnfd.get("ext-cpd", ()):
+ vnf_cp = {
+ "name": cp.get("id"),
+ "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
+ "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
+ "id": cp.get("id"),
+ }
+ new_vnfr_cp.append(vnf_cp)
+ new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
+ # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
+ # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
+ new_vnfr_update = {"revision": latest_vnfd_revision, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
+ self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
+ updated_db_vnfr = self.db.get_one(
+ "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}
+ )
+
+ # Instantiate new VNF resources
+ # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
+ vca_scaling_info = []
+ scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
+ scaling_info["scaling_direction"] = "OUT"
+ scaling_info["vdu-create"] = {}
+ scaling_info["kdu-create"] = {}
+ vdud_instantiate_list = db_vnfd["vdu"]
+ for index, vdud in enumerate(vdud_instantiate_list):
+ cloud_init_text = self._get_vdu_cloud_init_content(
+ vdud, db_vnfd
+ )
+ if cloud_init_text:
+ additional_params = (
+ self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
+ or {}
+ )
+ cloud_init_list = []
+ if cloud_init_text:
+ # TODO Information of its own ip is not available because db_vnfr is not updated.
+ additional_params["OSM"] = get_osm_params(
+ updated_db_vnfr, vdud["id"], 1
+ )
+ cloud_init_list.append(
+ self._parse_cloud_init(
+ cloud_init_text,
+ additional_params,
+ db_vnfd["id"],
+ vdud["id"],
+ )
+ )
+ vca_scaling_info.append(
+ {
+ "osm_vdu_id": vdud["id"],
+ "member-vnf-index": member_vnf_index,
+ "type": "create",
+ "vdu_index": count_index,
+ }
+ )
+ scaling_info["vdu-create"][vdud["id"]] = count_index
+ if self.ro_config.get("ng"):
+ self.logger.debug(
+ "New Resources to be deployed: {}".format(scaling_info))
+ await self._scale_ng_ro(
+ logging_text, db_nsr, update_db_nslcmops, updated_db_vnfr, scaling_info, stage
+ )
+ return "COMPLETED", "Done"
+ except (LcmException, asyncio.CancelledError):
+ raise
+ except Exception as e:
+ self.logger.debug("Error updating VNF {}".format(e))
+ return "FAILED", "Error updating VNF {}".format(e)
+
+ async def _ns_charm_upgrade(
+ self,
+ ee_id,
+ charm_id,
+ charm_type,
+ path,
+ timeout: float = None,
+ ) -> (str, str):
+ """This method upgrade charms in VNF instances
+
+ Args:
+ ee_id: Execution environment id
+ path: Local path to the charm
+ charm_id: charm-id
+ charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
+ timeout: (Float) Timeout for the ns update operation
+
+ Returns:
+ result: (str, str) COMPLETED/FAILED, details
+ """
+ try:
+ charm_type = charm_type or "lxc_proxy_charm"
+ output = await self.vca_map[charm_type].upgrade_charm(
+ ee_id=ee_id,
+ path=path,
+ charm_id=charm_id,
+ charm_type=charm_type,
+ timeout=timeout or self.timeout_ns_update,
+ )
+
+ if output:
+ return "COMPLETED", output
+
+ except (LcmException, asyncio.CancelledError):
+ raise
+
+ except Exception as e:
+
+ self.logger.debug("Error upgrading charm {}".format(path))
+
+ return "FAILED", "Error upgrading charm {}: {}".format(path, e)
+
+ async def update(self, nsr_id, nslcmop_id):
+ """Update NS according to different update types
+
+ This method performs upgrade of VNF instances then updates the revision
+ number in VNF record
+
+ Args:
+ nsr_id: Network service will be updated
+ nslcmop_id: ns lcm operation id
+
+ Returns:
+ It may raise DbException, LcmException, N2VCException, K8sException
+
+ """
+ # Try to lock HA task here
+ task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
+ if not task_is_locked_by_me:
+ return
+
+ logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
+ self.logger.debug(logging_text + "Enter")
+
+ # Set the required variables to be filled up later
+ db_nsr = None
+ db_nslcmop_update = {}
+ vnfr_update = {}
+ nslcmop_operation_state = None
+ db_nsr_update = {}
+ error_description_nslcmop = ""
+ exc = None
+ change_type = "updated"
+ detailed_status = ""
+
+ try:
+ # wait for any previous tasks in process
+ step = "Waiting for previous operations to terminate"
+ await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
+ self._write_ns_status(
+ nsr_id=nsr_id,
+ ns_state=None,
+ current_operation="UPDATING",
+ current_operation_id=nslcmop_id,
+ )
+
+ step = "Getting nslcmop from database"
+ db_nslcmop = self.db.get_one(
+ "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
+ )
+ update_type = db_nslcmop["operationParams"]["updateType"]
+
+ step = "Getting nsr from database"
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ old_operational_status = db_nsr["operational-status"]
+ db_nsr_update["operational-status"] = "updating"
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ nsr_deployed = db_nsr["_admin"].get("deployed")
+
+ if update_type == "CHANGE_VNFPKG":
+
+ # Get the input parameters given through update request
+ vnf_instance_id = db_nslcmop["operationParams"][
+ "changeVnfPackageData"
+ ].get("vnfInstanceId")
+
+ vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
+ "vnfdId"
+ )
+ timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
+
+ step = "Getting vnfr from database"
+ db_vnfr = self.db.get_one(
+ "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
+ )
+
+ step = "Getting vnfds from database"
+ # Latest VNFD
+ latest_vnfd = self.db.get_one(
+ "vnfds", {"_id": vnfd_id}, fail_on_empty=False
+ )
+ latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
+
+ # Current VNFD
+ current_vnf_revision = db_vnfr.get("revision", 1)
+ current_vnfd = self.db.get_one(
+ "vnfds_revisions",
+ {"_id": vnfd_id + ":" + str(current_vnf_revision)},
+ fail_on_empty=False,
+ )
+ # Charm artifact paths will be filled up later
+ (
+ current_charm_artifact_path,
+ target_charm_artifact_path,
+ charm_artifact_paths,
+ ) = ([], [], [])
+
+ step = "Checking if revision has changed in VNFD"
+ if current_vnf_revision != latest_vnfd_revision:
+
+ change_type = "policy_updated"
+
+ # There is new revision of VNFD, update operation is required
+ current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
+ latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
+
+ step = "Removing the VNFD packages if they exist in the local path"
+ shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
+ shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
+
+ step = "Get the VNFD packages from FSMongo"
+ self.fs.sync(from_path=latest_vnfd_path)
+ self.fs.sync(from_path=current_vnfd_path)
+
+ step = (
+ "Get the charm-type, charm-id, ee-id if there is deployed VCA"
+ )
+ base_folder = latest_vnfd["_admin"]["storage"]
+
+ for charm_index, charm_deployed in enumerate(
+ get_iterable(nsr_deployed, "VCA")
+ ):
+ vnf_index = db_vnfr.get("member-vnf-index-ref")
+
+ # Getting charm-id and charm-type
+ if charm_deployed.get("member-vnf-index") == vnf_index:
+ charm_id = self.get_vca_id(db_vnfr, db_nsr)
+ charm_type = charm_deployed.get("type")
+
+ # Getting ee-id
+ ee_id = charm_deployed.get("ee_id")
+
+ step = "Getting descriptor config"
+ descriptor_config = get_configuration(
+ current_vnfd, current_vnfd["id"]
+ )
+
+ if "execution-environment-list" in descriptor_config:
+ ee_list = descriptor_config.get(
+ "execution-environment-list", []
+ )
+ else:
+ ee_list = []
+
+ # There could be several charm used in the same VNF
+ for ee_item in ee_list:
+ if ee_item.get("juju"):
+
+ step = "Getting charm name"
+ charm_name = ee_item["juju"].get("charm")
+
+ step = "Setting Charm artifact paths"
+ current_charm_artifact_path.append(
+ get_charm_artifact_path(
+ base_folder,
+ charm_name,
+ charm_type,
+ current_vnf_revision,
+ )
+ )
+ target_charm_artifact_path.append(
+ get_charm_artifact_path(
+ base_folder,
+ charm_name,
+ charm_type,
+ latest_vnfd_revision,
+ )
+ )
+
+ charm_artifact_paths = zip(
+ current_charm_artifact_path, target_charm_artifact_path
+ )
+
+ step = "Checking if software version has changed in VNFD"
+ if find_software_version(current_vnfd) != find_software_version(
+ latest_vnfd
+ ):
+
+ step = "Checking if existing VNF has charm"
+ for current_charm_path, target_charm_path in list(
+ charm_artifact_paths
+ ):
+ if current_charm_path:
+ raise LcmException(
+ "Software version change is not supported as VNF instance {} has charm.".format(
+ vnf_instance_id
+ )
+ )
+
+ # There is no change in the charm package, then redeploy the VNF
+ # based on new descriptor
+ step = "Redeploying VNF"
+ member_vnf_index = db_vnfr["member-vnf-index-ref"]
+ (
+ result,
+ detailed_status
+ ) = await self._ns_redeploy_vnf(
+ nsr_id,
+ nslcmop_id,
+ latest_vnfd,
+ db_vnfr,
+ db_nsr
+ )
+ if result == "FAILED":
+ nslcmop_operation_state = result
+ error_description_nslcmop = detailed_status
+ db_nslcmop_update["detailed-status"] = detailed_status
+ self.logger.debug(
+ logging_text
+ + " step {} Done with result {} {}".format(
+ step, nslcmop_operation_state, detailed_status
+ )
+ )
+
+ else:
+ step = "Checking if any charm package has changed or not"
+ for current_charm_path, target_charm_path in list(
+ charm_artifact_paths
+ ):
+ if (
+ current_charm_path
+ and target_charm_path
+ and self.check_charm_hash_changed(
+ current_charm_path, target_charm_path
+ )
+ ):
+
+ step = "Checking whether VNF uses juju bundle"
+ if check_juju_bundle_existence(current_vnfd):
+
+ raise LcmException(
+ "Charm upgrade is not supported for the instance which"
+ " uses juju-bundle: {}".format(
+ check_juju_bundle_existence(current_vnfd)
+ )
+ )
+
+ step = "Upgrading Charm"
+ (
+ result,
+ detailed_status,
+ ) = await self._ns_charm_upgrade(
+ ee_id=ee_id,
+ charm_id=charm_id,
+ charm_type=charm_type,
+ path=self.fs.path + target_charm_path,
+ timeout=timeout_seconds,
+ )
+
+ if result == "FAILED":
+ nslcmop_operation_state = result
+ error_description_nslcmop = detailed_status
+
+ db_nslcmop_update["detailed-status"] = detailed_status
+ self.logger.debug(
+ logging_text
+ + " step {} Done with result {} {}".format(
+ step, nslcmop_operation_state, detailed_status
+ )
+ )
+
+ step = "Updating policies"
+ member_vnf_index = db_vnfr["member-vnf-index-ref"]
+ result = "COMPLETED"
+ detailed_status = "Done"
+ db_nslcmop_update["detailed-status"] = "Done"
+
+ # If nslcmop_operation_state is None, so any operation is not failed.
+ if not nslcmop_operation_state:
+ nslcmop_operation_state = "COMPLETED"
+
+ # If update CHANGE_VNFPKG nslcmop_operation is successful
+ # vnf revision need to be updated
+ vnfr_update["revision"] = latest_vnfd_revision
+ self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
+
+ self.logger.debug(
+ logging_text
+ + " task Done with result {} {}".format(
+ nslcmop_operation_state, detailed_status
+ )
+ )
+ elif update_type == "REMOVE_VNF":
+ # This part is included in https://osm.etsi.org/gerrit/11876
+ vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
+ db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
+ member_vnf_index = db_vnfr["member-vnf-index-ref"]
+ step = "Removing VNF"
+ (result, detailed_status) = await self.remove_vnf(nsr_id, nslcmop_id, vnf_instance_id)
+ if result == "FAILED":
+ nslcmop_operation_state = result
+ error_description_nslcmop = detailed_status
+ db_nslcmop_update["detailed-status"] = detailed_status
+ change_type = "vnf_terminated"
+ if not nslcmop_operation_state:
+ nslcmop_operation_state = "COMPLETED"
+ self.logger.debug(
+ logging_text
+ + " task Done with result {} {}".format(
+ nslcmop_operation_state, detailed_status
+ )
+ )
+
+ elif update_type == "OPERATE_VNF":
+ vnf_id = db_nslcmop["operationParams"]["operateVnfData"]["vnfInstanceId"]
+ operation_type = db_nslcmop["operationParams"]["operateVnfData"]["changeStateTo"]
+ additional_param = db_nslcmop["operationParams"]["operateVnfData"]["additionalParam"]
+ (result, detailed_status) = await self.rebuild_start_stop(
+ nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
+ )
+ if result == "FAILED":
+ nslcmop_operation_state = result
+ error_description_nslcmop = detailed_status
+ db_nslcmop_update["detailed-status"] = detailed_status
+ if not nslcmop_operation_state:
+ nslcmop_operation_state = "COMPLETED"
+ self.logger.debug(
+ logging_text
+ + " task Done with result {} {}".format(
+ nslcmop_operation_state, detailed_status
+ )
+ )
+
+ # If nslcmop_operation_state is None, so any operation is not failed.
+ # All operations are executed in overall.
+ if not nslcmop_operation_state:
+ nslcmop_operation_state = "COMPLETED"
+ db_nsr_update["operational-status"] = old_operational_status
+
+ except (DbException, LcmException, N2VCException, K8sException) as e:
+ self.logger.error(logging_text + "Exit Exception {}".format(e))
+ exc = e
+ except asyncio.CancelledError:
+ self.logger.error(
+ logging_text + "Cancelled Exception while '{}'".format(step)
+ )
+ exc = "Operation was cancelled"
+ except asyncio.TimeoutError:
+ self.logger.error(logging_text + "Timeout while '{}'".format(step))
+ exc = "Timeout"
+ except Exception as e:
+ exc = traceback.format_exc()
+ self.logger.critical(
+ logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
+ exc_info=True,
+ )
+ finally:
+ if exc:
+ db_nslcmop_update[
+ "detailed-status"
+ ] = (
+ detailed_status
+ ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
+ nslcmop_operation_state = "FAILED"
+ db_nsr_update["operational-status"] = old_operational_status
+ if db_nsr:
+ self._write_ns_status(
+ nsr_id=nsr_id,
+ ns_state=db_nsr["nsState"],
+ current_operation="IDLE",
+ current_operation_id=None,
+ other_update=db_nsr_update,
+ )
+
+ self._write_op_status(
+ op_id=nslcmop_id,
+ stage="",
+ error_message=error_description_nslcmop,
+ operation_state=nslcmop_operation_state,
+ other_update=db_nslcmop_update,
+ )
+
+ if nslcmop_operation_state:
+ try:
+ msg = {
+ "nsr_id": nsr_id,
+ "nslcmop_id": nslcmop_id,
+ "operationState": nslcmop_operation_state,
+ }
+ if change_type in ("vnf_terminated", "policy_updated"):
+ msg.update({"vnf_member_index": member_vnf_index})
+ await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
+ except Exception as e:
+ self.logger.error(
+ logging_text + "kafka_write notification Exception {}".format(e)
+ )
+ self.logger.debug(logging_text + "Exit")
+ self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
+ return nslcmop_operation_state, detailed_status
+
+ async def scale(self, nsr_id, nslcmop_id):
+ # Try to lock HA task here
+ task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
+ if not task_is_locked_by_me:
+ return
+
+ logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
+ stage = ["", "", ""]
+ tasks_dict_info = {}
+ # ^ stage, step, VIM progress
+ self.logger.debug(logging_text + "Enter")
+ # get all needed from database
+ db_nsr = None
+ db_nslcmop_update = {}
+ db_nsr_update = {}
+ exc = None
+ # in case of error, indicates what part of scale was failed to put nsr at error status
+ scale_process = None
+ old_operational_status = ""
+ old_config_status = ""
+ nsi_id = None
+ try:
+ # wait for any previous tasks in process
+ step = "Waiting for previous operations to terminate"
+ await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
+ self._write_ns_status(
+ nsr_id=nsr_id,
+ ns_state=None,
+ current_operation="SCALING",
+ current_operation_id=nslcmop_id,
+ )
+
+ step = "Getting nslcmop from database"
+ self.logger.debug(
+ step + " after having waited for previous tasks to be completed"
+ )
+ db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
+
+ step = "Getting nsr from database"
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ old_operational_status = db_nsr["operational-status"]
+ old_config_status = db_nsr["config-status"]
+
+ step = "Parsing scaling parameters"
+ db_nsr_update["operational-status"] = "scaling"
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ nsr_deployed = db_nsr["_admin"].get("deployed")
+
+ vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
+ "scaleByStepData"
+ ]["member-vnf-index"]
+ scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
+ "scaleByStepData"
+ ]["scaling-group-descriptor"]
+ scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
+ # for backward compatibility
+ if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
+ nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
+ db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+
+ step = "Getting vnfr from database"
+ db_vnfr = self.db.get_one(
+ "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
+ )
+
+ vca_id = self.get_vca_id(db_vnfr, db_nsr)
+
+ step = "Getting vnfd from database"
+ db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
+
+ base_folder = db_vnfd["_admin"]["storage"]
+
+ step = "Getting scaling-group-descriptor"
+ scaling_descriptor = find_in_list(
+ get_scaling_aspect(db_vnfd),
+ lambda scale_desc: scale_desc["name"] == scaling_group,
+ )
+ if not scaling_descriptor:
+ raise LcmException(
+ "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
+ "at vnfd:scaling-group-descriptor".format(scaling_group)
+ )
+
+ step = "Sending scale order to VIM"
+ # TODO check if ns is in a proper status
+ nb_scale_op = 0
+ if not db_nsr["_admin"].get("scaling-group"):
+ self.update_db_2(
+ "nsrs",
+ nsr_id,
+ {
+ "_admin.scaling-group": [
+ {"name": scaling_group, "nb-scale-op": 0}
+ ]
+ },
+ )
+ admin_scale_index = 0
+ else:
+ for admin_scale_index, admin_scale_info in enumerate(
+ db_nsr["_admin"]["scaling-group"]
+ ):
+ if admin_scale_info["name"] == scaling_group:
+ nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
+ break
+ else: # not found, set index one plus last element and add new entry with the name
+ admin_scale_index += 1
+ db_nsr_update[
+ "_admin.scaling-group.{}.name".format(admin_scale_index)
+ ] = scaling_group
+
+ vca_scaling_info = []
+ scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
+ if scaling_type == "SCALE_OUT":
+ if "aspect-delta-details" not in scaling_descriptor:
+ raise LcmException(
+ "Aspect delta details not fount in scaling descriptor {}".format(
+ scaling_descriptor["name"]
+ )
+ )
+ # count if max-instance-count is reached
+ deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
+
+ scaling_info["scaling_direction"] = "OUT"
+ scaling_info["vdu-create"] = {}
+ scaling_info["kdu-create"] = {}
+ for delta in deltas:
+ for vdu_delta in delta.get("vdu-delta", {}):
+ vdud = get_vdu(db_vnfd, vdu_delta["id"])
+ # vdu_index also provides the number of instance of the targeted vdu
+ vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
+ cloud_init_text = self._get_vdu_cloud_init_content(
+ vdud, db_vnfd
+ )
+ if cloud_init_text:
+ additional_params = (
+ self._get_vdu_additional_params(db_vnfr, vdud["id"])
+ or {}
+ )
+ cloud_init_list = []
+
+ vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
+ max_instance_count = 10
+ if vdu_profile and "max-number-of-instances" in vdu_profile:
+ max_instance_count = vdu_profile.get(
+ "max-number-of-instances", 10
+ )
+
+ default_instance_num = get_number_of_instances(
+ db_vnfd, vdud["id"]
+ )
+ instances_number = vdu_delta.get("number-of-instances", 1)
+ nb_scale_op += instances_number
+
+ new_instance_count = nb_scale_op + default_instance_num
+ # Control if new count is over max and vdu count is less than max.
+ # Then assign new instance count
+ if new_instance_count > max_instance_count > vdu_count:
+ instances_number = new_instance_count - max_instance_count
+ else:
+ instances_number = instances_number
+
+ if new_instance_count > max_instance_count:
+ raise LcmException(
+ "reached the limit of {} (max-instance-count) "
+ "scaling-out operations for the "
+ "scaling-group-descriptor '{}'".format(
+ nb_scale_op, scaling_group
+ )
+ )
+ for x in range(vdu_delta.get("number-of-instances", 1)):
+ if cloud_init_text:
+ # TODO Information of its own ip is not available because db_vnfr is not updated.
+ additional_params["OSM"] = get_osm_params(
+ db_vnfr, vdu_delta["id"], vdu_index + x
+ )
+ cloud_init_list.append(
+ self._parse_cloud_init(
+ cloud_init_text,
+ additional_params,
+ db_vnfd["id"],
+ vdud["id"],
+ )
+ )
+ vca_scaling_info.append(
+ {
+ "osm_vdu_id": vdu_delta["id"],
+ "member-vnf-index": vnf_index,
+ "type": "create",
+ "vdu_index": vdu_index + x,
+ }
+ )
+ scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
+ for kdu_delta in delta.get("kdu-resource-delta", {}):
+ kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
+ kdu_name = kdu_profile["kdu-name"]
+ resource_name = kdu_profile.get("resource-name", "")
+
+ # Might have different kdus in the same delta
+ # Should have list for each kdu
+ if not scaling_info["kdu-create"].get(kdu_name, None):
+ scaling_info["kdu-create"][kdu_name] = []
+
+ kdur = get_kdur(db_vnfr, kdu_name)
+ if kdur.get("helm-chart"):
+ k8s_cluster_type = "helm-chart-v3"
+ self.logger.debug("kdur: {}".format(kdur))
+ if (
+ kdur.get("helm-version")
+ and kdur.get("helm-version") == "v2"
+ ):
+ k8s_cluster_type = "helm-chart"
+ elif kdur.get("juju-bundle"):
+ k8s_cluster_type = "juju-bundle"
+ else:
+ raise LcmException(
+ "kdu type for kdu='{}.{}' is neither helm-chart nor "
+ "juju-bundle. Maybe an old NBI version is running".format(
+ db_vnfr["member-vnf-index-ref"], kdu_name
+ )
+ )
+
+ max_instance_count = 10
+ if kdu_profile and "max-number-of-instances" in kdu_profile:
+ max_instance_count = kdu_profile.get(
+ "max-number-of-instances", 10
+ )
+
+ nb_scale_op += kdu_delta.get("number-of-instances", 1)
+ deployed_kdu, _ = get_deployed_kdu(
+ nsr_deployed, kdu_name, vnf_index
+ )
+ if deployed_kdu is None:
+ raise LcmException(
+ "KDU '{}' for vnf '{}' not deployed".format(
+ kdu_name, vnf_index
+ )
+ )
+ kdu_instance = deployed_kdu.get("kdu-instance")
+ instance_num = await self.k8scluster_map[
+ k8s_cluster_type
+ ].get_scale_count(
+ resource_name,
+ kdu_instance,
+ vca_id=vca_id,
+ cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
+ kdu_model=deployed_kdu.get("kdu-model"),
+ )
+ kdu_replica_count = instance_num + kdu_delta.get(
+ "number-of-instances", 1
+ )
+
+ # Control if new count is over max and instance_num is less than max.
+ # Then assign max instance number to kdu replica count
+ if kdu_replica_count > max_instance_count > instance_num:
+ kdu_replica_count = max_instance_count
+ if kdu_replica_count > max_instance_count:
+ raise LcmException(
+ "reached the limit of {} (max-instance-count) "
+ "scaling-out operations for the "
+ "scaling-group-descriptor '{}'".format(
+ instance_num, scaling_group
+ )
+ )
+
+ for x in range(kdu_delta.get("number-of-instances", 1)):
+ vca_scaling_info.append(
+ {
+ "osm_kdu_id": kdu_name,
+ "member-vnf-index": vnf_index,
+ "type": "create",
+ "kdu_index": instance_num + x - 1,
+ }
+ )
+ scaling_info["kdu-create"][kdu_name].append(
+ {
+ "member-vnf-index": vnf_index,
+ "type": "create",
+ "k8s-cluster-type": k8s_cluster_type,
+ "resource-name": resource_name,
+ "scale": kdu_replica_count,
+ }
+ )
+ elif scaling_type == "SCALE_IN":
+ deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
+
+ scaling_info["scaling_direction"] = "IN"
+ scaling_info["vdu-delete"] = {}
+ scaling_info["kdu-delete"] = {}
+
+ for delta in deltas:
+ for vdu_delta in delta.get("vdu-delta", {}):
+ vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
+ min_instance_count = 0
+ vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
+ if vdu_profile and "min-number-of-instances" in vdu_profile:
+ min_instance_count = vdu_profile["min-number-of-instances"]
+
+ default_instance_num = get_number_of_instances(
+ db_vnfd, vdu_delta["id"]
+ )
+ instance_num = vdu_delta.get("number-of-instances", 1)
+ nb_scale_op -= instance_num
+
+ new_instance_count = nb_scale_op + default_instance_num
+
+ if new_instance_count < min_instance_count < vdu_count:
+ instances_number = min_instance_count - new_instance_count
+ else:
+ instances_number = instance_num
+
+ if new_instance_count < min_instance_count:
+ raise LcmException(
+ "reached the limit of {} (min-instance-count) scaling-in operations for the "
+ "scaling-group-descriptor '{}'".format(
+ nb_scale_op, scaling_group
+ )
+ )
+ for x in range(vdu_delta.get("number-of-instances", 1)):
+ vca_scaling_info.append(
+ {
+ "osm_vdu_id": vdu_delta["id"],
+ "member-vnf-index": vnf_index,
+ "type": "delete",
+ "vdu_index": vdu_index - 1 - x,
+ }
+ )
+ scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
+ for kdu_delta in delta.get("kdu-resource-delta", {}):
+ kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
+ kdu_name = kdu_profile["kdu-name"]
+ resource_name = kdu_profile.get("resource-name", "")
+
+ if not scaling_info["kdu-delete"].get(kdu_name, None):
+ scaling_info["kdu-delete"][kdu_name] = []
+
+ kdur = get_kdur(db_vnfr, kdu_name)
+ if kdur.get("helm-chart"):
+ k8s_cluster_type = "helm-chart-v3"
+ self.logger.debug("kdur: {}".format(kdur))
+ if (
+ kdur.get("helm-version")
+ and kdur.get("helm-version") == "v2"
+ ):
+ k8s_cluster_type = "helm-chart"
+ elif kdur.get("juju-bundle"):
+ k8s_cluster_type = "juju-bundle"
+ else:
+ raise LcmException(
+ "kdu type for kdu='{}.{}' is neither helm-chart nor "
+ "juju-bundle. Maybe an old NBI version is running".format(
+ db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
+ )
+ )
+
+ min_instance_count = 0
+ if kdu_profile and "min-number-of-instances" in kdu_profile:
+ min_instance_count = kdu_profile["min-number-of-instances"]
+
+ nb_scale_op -= kdu_delta.get("number-of-instances", 1)
+ deployed_kdu, _ = get_deployed_kdu(
+ nsr_deployed, kdu_name, vnf_index
+ )
+ if deployed_kdu is None:
+ raise LcmException(
+ "KDU '{}' for vnf '{}' not deployed".format(
+ kdu_name, vnf_index
+ )
+ )
+ kdu_instance = deployed_kdu.get("kdu-instance")
+ instance_num = await self.k8scluster_map[
+ k8s_cluster_type
+ ].get_scale_count(
+ resource_name,
+ kdu_instance,
+ vca_id=vca_id,
+ cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
+ kdu_model=deployed_kdu.get("kdu-model"),
+ )
+ kdu_replica_count = instance_num - kdu_delta.get(
+ "number-of-instances", 1
+ )
+
+ if kdu_replica_count < min_instance_count < instance_num:
+ kdu_replica_count = min_instance_count
+ if kdu_replica_count < min_instance_count:
+ raise LcmException(
+ "reached the limit of {} (min-instance-count) scaling-in operations for the "
+ "scaling-group-descriptor '{}'".format(
+ instance_num, scaling_group
+ )
+ )
+
+ for x in range(kdu_delta.get("number-of-instances", 1)):
+ vca_scaling_info.append(
+ {
+ "osm_kdu_id": kdu_name,
+ "member-vnf-index": vnf_index,
+ "type": "delete",
+ "kdu_index": instance_num - x - 1,
+ }
+ )
+ scaling_info["kdu-delete"][kdu_name].append(
+ {
+ "member-vnf-index": vnf_index,
+ "type": "delete",
+ "k8s-cluster-type": k8s_cluster_type,
+ "resource-name": resource_name,
+ "scale": kdu_replica_count,
+ }
+ )
+
+ # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
+ vdu_delete = copy(scaling_info.get("vdu-delete"))
+ if scaling_info["scaling_direction"] == "IN":
+ for vdur in reversed(db_vnfr["vdur"]):
+ if vdu_delete.get(vdur["vdu-id-ref"]):
+ vdu_delete[vdur["vdu-id-ref"]] -= 1
+ scaling_info["vdu"].append(
+ {
+ "name": vdur.get("name") or vdur.get("vdu-name"),
+ "vdu_id": vdur["vdu-id-ref"],
+ "interface": [],
+ }
+ )
+ for interface in vdur["interfaces"]:
+ scaling_info["vdu"][-1]["interface"].append(
+ {
+ "name": interface["name"],
+ "ip_address": interface["ip-address"],
+ "mac_address": interface.get("mac-address"),
+ }
+ )
+ # vdu_delete = vdu_scaling_info.pop("vdu-delete")
+
+ # PRE-SCALE BEGIN
+ step = "Executing pre-scale vnf-config-primitive"
+ if scaling_descriptor.get("scaling-config-action"):
+ for scaling_config_action in scaling_descriptor[
+ "scaling-config-action"
+ ]:
+ if (
+ scaling_config_action.get("trigger") == "pre-scale-in"
+ and scaling_type == "SCALE_IN"
+ ) or (
+ scaling_config_action.get("trigger") == "pre-scale-out"
+ and scaling_type == "SCALE_OUT"
+ ):
+ vnf_config_primitive = scaling_config_action[
+ "vnf-config-primitive-name-ref"
+ ]
+ step = db_nslcmop_update[
+ "detailed-status"
+ ] = "executing pre-scale scaling-config-action '{}'".format(
+ vnf_config_primitive
+ )
+
+ # look for primitive
+ for config_primitive in (
+ get_configuration(db_vnfd, db_vnfd["id"]) or {}
+ ).get("config-primitive", ()):
+ if config_primitive["name"] == vnf_config_primitive:
+ break
+ else:
+ raise LcmException(
+ "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
+ "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
+ "primitive".format(scaling_group, vnf_config_primitive)
+ )
+
+ vnfr_params = {"VDU_SCALE_INFO": scaling_info}
+ if db_vnfr.get("additionalParamsForVnf"):
+ vnfr_params.update(db_vnfr["additionalParamsForVnf"])
+
+ scale_process = "VCA"
+ db_nsr_update["config-status"] = "configuring pre-scaling"
+ primitive_params = self._map_primitive_params(
+ config_primitive, {}, vnfr_params
+ )
+
+ # Pre-scale retry check: Check if this sub-operation has been executed before
+ op_index = self._check_or_add_scale_suboperation(
+ db_nslcmop,
+ vnf_index,
+ vnf_config_primitive,
+ primitive_params,
+ "PRE-SCALE",
+ )
+ if op_index == self.SUBOPERATION_STATUS_SKIP:
+ # Skip sub-operation
+ result = "COMPLETED"
+ result_detail = "Done"
+ self.logger.debug(
+ logging_text
+ + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
+ vnf_config_primitive, result, result_detail
+ )
+ )
+ else:
+ if op_index == self.SUBOPERATION_STATUS_NEW:
+ # New sub-operation: Get index of this sub-operation
+ op_index = (
+ len(db_nslcmop.get("_admin", {}).get("operations"))
+ - 1
+ )
+ self.logger.debug(
+ logging_text
+ + "vnf_config_primitive={} New sub-operation".format(
+ vnf_config_primitive
+ )
+ )
+ else:
+ # retry: Get registered params for this existing sub-operation
+ op = db_nslcmop.get("_admin", {}).get("operations", [])[
+ op_index
+ ]
+ vnf_index = op.get("member_vnf_index")
+ vnf_config_primitive = op.get("primitive")
+ primitive_params = op.get("primitive_params")
+ self.logger.debug(
+ logging_text
+ + "vnf_config_primitive={} Sub-operation retry".format(
+ vnf_config_primitive
+ )
+ )
+ # Execute the primitive, either with new (first-time) or registered (reintent) args
+ ee_descriptor_id = config_primitive.get(
+ "execution-environment-ref"
+ )
+ primitive_name = config_primitive.get(
+ "execution-environment-primitive", vnf_config_primitive
+ )
+ ee_id, vca_type = self._look_for_deployed_vca(
+ nsr_deployed["VCA"],
+ member_vnf_index=vnf_index,
+ vdu_id=None,
+ vdu_count_index=None,
+ ee_descriptor_id=ee_descriptor_id,
+ )
+ result, result_detail = await self._ns_execute_primitive(
+ ee_id,
+ primitive_name,
+ primitive_params,
+ vca_type=vca_type,
+ vca_id=vca_id,
+ )
+ self.logger.debug(
+ logging_text
+ + "vnf_config_primitive={} Done with result {} {}".format(
+ vnf_config_primitive, result, result_detail
+ )
+ )
+ # Update operationState = COMPLETED | FAILED
+ self._update_suboperation_status(
+ db_nslcmop, op_index, result, result_detail
+ )
+
+ if result == "FAILED":
+ raise LcmException(result_detail)
+ db_nsr_update["config-status"] = old_config_status
+ scale_process = None
+ # PRE-SCALE END
+
+ db_nsr_update[
+ "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
+ ] = nb_scale_op
+ db_nsr_update[
+ "_admin.scaling-group.{}.time".format(admin_scale_index)
+ ] = time()
+
+ # SCALE-IN VCA - BEGIN
+ if vca_scaling_info:
+ step = db_nslcmop_update[
+ "detailed-status"
+ ] = "Deleting the execution environments"
+ scale_process = "VCA"
+ for vca_info in vca_scaling_info:
+ if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
+ member_vnf_index = str(vca_info["member-vnf-index"])
+ self.logger.debug(
+ logging_text + "vdu info: {}".format(vca_info)
+ )
+ if vca_info.get("osm_vdu_id"):
+ vdu_id = vca_info["osm_vdu_id"]
+ vdu_index = int(vca_info["vdu_index"])
+ stage[
+ 1
+ ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
+ member_vnf_index, vdu_id, vdu_index
+ )
+ stage[2] = step = "Scaling in VCA"
+ self._write_op_status(op_id=nslcmop_id, stage=stage)
+ vca_update = db_nsr["_admin"]["deployed"]["VCA"]
+ config_update = db_nsr["configurationStatus"]
+ for vca_index, vca in enumerate(vca_update):
+ if (
+ (vca or vca.get("ee_id"))
+ and vca["member-vnf-index"] == member_vnf_index
+ and vca["vdu_count_index"] == vdu_index
+ ):
+ if vca.get("vdu_id"):
+ config_descriptor = get_configuration(
+ db_vnfd, vca.get("vdu_id")
+ )
+ elif vca.get("kdu_name"):
+ config_descriptor = get_configuration(
+ db_vnfd, vca.get("kdu_name")
+ )
+ else:
+ config_descriptor = get_configuration(
+ db_vnfd, db_vnfd["id"]
+ )
+ operation_params = (
+ db_nslcmop.get("operationParams") or {}
+ )
+ exec_terminate_primitives = not operation_params.get(
+ "skip_terminate_primitives"
+ ) and vca.get("needed_terminate")
+ task = asyncio.ensure_future(
+ asyncio.wait_for(
+ self.destroy_N2VC(
+ logging_text,
+ db_nslcmop,
+ vca,
+ config_descriptor,
+ vca_index,
+ destroy_ee=True,
+ exec_primitives=exec_terminate_primitives,
+ scaling_in=True,
+ vca_id=vca_id,
+ ),
+ timeout=self.timeout_charm_delete,
+ )
+ )
+ tasks_dict_info[task] = "Terminating VCA {}".format(
+ vca.get("ee_id")
+ )
+ del vca_update[vca_index]
+ del config_update[vca_index]
+ # wait for pending tasks of terminate primitives
+ if tasks_dict_info:
+ self.logger.debug(
+ logging_text
+ + "Waiting for tasks {}".format(
+ list(tasks_dict_info.keys())
+ )
+ )
+ error_list = await self._wait_for_tasks(
+ logging_text,
+ tasks_dict_info,
+ min(
+ self.timeout_charm_delete, self.timeout_ns_terminate
+ ),
+ stage,
+ nslcmop_id,
+ )
+ tasks_dict_info.clear()
+ if error_list:
+ raise LcmException("; ".join(error_list))
+
+ db_vca_and_config_update = {
+ "_admin.deployed.VCA": vca_update,
+ "configurationStatus": config_update,
+ }
+ self.update_db_2(
+ "nsrs", db_nsr["_id"], db_vca_and_config_update
+ )
+ scale_process = None
+ # SCALE-IN VCA - END
+
+ # SCALE RO - BEGIN
+ if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
+ scale_process = "RO"
+ if self.ro_config.get("ng"):
+ await self._scale_ng_ro(
+ logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
+ )
+ scaling_info.pop("vdu-create", None)
+ scaling_info.pop("vdu-delete", None)
+
+ scale_process = None
+ # SCALE RO - END
+
+ # SCALE KDU - BEGIN
+ if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
+ scale_process = "KDU"
+ await self._scale_kdu(
+ logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
+ )
+ scaling_info.pop("kdu-create", None)
+ scaling_info.pop("kdu-delete", None)
+
+ scale_process = None
+ # SCALE KDU - END
+
+ if db_nsr_update:
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+
+ # SCALE-UP VCA - BEGIN
+ if vca_scaling_info:
+ step = db_nslcmop_update[
+ "detailed-status"
+ ] = "Creating new execution environments"
+ scale_process = "VCA"
+ for vca_info in vca_scaling_info:
+ if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
+ member_vnf_index = str(vca_info["member-vnf-index"])
+ self.logger.debug(
+ logging_text + "vdu info: {}".format(vca_info)
+ )
+ vnfd_id = db_vnfr["vnfd-ref"]
+ if vca_info.get("osm_vdu_id"):
+ vdu_index = int(vca_info["vdu_index"])
+ deploy_params = {"OSM": get_osm_params(db_vnfr)}
+ if db_vnfr.get("additionalParamsForVnf"):
+ deploy_params.update(
+ parse_yaml_strings(
+ db_vnfr["additionalParamsForVnf"].copy()
+ )
+ )
+ descriptor_config = get_configuration(
+ db_vnfd, db_vnfd["id"]
+ )
+ if descriptor_config:
+ vdu_id = None
+ vdu_name = None
+ kdu_name = None
+ self._deploy_n2vc(
+ logging_text=logging_text
+ + "member_vnf_index={} ".format(member_vnf_index),
+ db_nsr=db_nsr,
+ db_vnfr=db_vnfr,
+ nslcmop_id=nslcmop_id,
+ nsr_id=nsr_id,
+ nsi_id=nsi_id,
+ vnfd_id=vnfd_id,
+ vdu_id=vdu_id,
+ kdu_name=kdu_name,
+ member_vnf_index=member_vnf_index,
+ vdu_index=vdu_index,
+ vdu_name=vdu_name,
+ deploy_params=deploy_params,
+ descriptor_config=descriptor_config,
+ base_folder=base_folder,
+ task_instantiation_info=tasks_dict_info,
+ stage=stage,
+ )
+ vdu_id = vca_info["osm_vdu_id"]
+ vdur = find_in_list(
+ db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
+ )
+ descriptor_config = get_configuration(db_vnfd, vdu_id)
+ if vdur.get("additionalParams"):
+ deploy_params_vdu = parse_yaml_strings(
+ vdur["additionalParams"]
+ )
+ else:
+ deploy_params_vdu = deploy_params
+ deploy_params_vdu["OSM"] = get_osm_params(
+ db_vnfr, vdu_id, vdu_count_index=vdu_index
+ )
+ if descriptor_config:
+ vdu_name = None
+ kdu_name = None
+ stage[
+ 1
+ ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
+ member_vnf_index, vdu_id, vdu_index
+ )
+ stage[2] = step = "Scaling out VCA"
+ self._write_op_status(op_id=nslcmop_id, stage=stage)
+ self._deploy_n2vc(
+ logging_text=logging_text
+ + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
+ member_vnf_index, vdu_id, vdu_index
+ ),
+ db_nsr=db_nsr,
+ db_vnfr=db_vnfr,
+ nslcmop_id=nslcmop_id,
+ nsr_id=nsr_id,
+ nsi_id=nsi_id,
+ vnfd_id=vnfd_id,
+ vdu_id=vdu_id,
+ kdu_name=kdu_name,
+ member_vnf_index=member_vnf_index,
+ vdu_index=vdu_index,
+ vdu_name=vdu_name,
+ deploy_params=deploy_params_vdu,
+ descriptor_config=descriptor_config,
+ base_folder=base_folder,
+ task_instantiation_info=tasks_dict_info,
+ stage=stage,
+ )
+ # SCALE-UP VCA - END
+ scale_process = None
+
+ # POST-SCALE BEGIN
+ # execute primitive service POST-SCALING
+ step = "Executing post-scale vnf-config-primitive"
+ if scaling_descriptor.get("scaling-config-action"):
+ for scaling_config_action in scaling_descriptor[
+ "scaling-config-action"
+ ]:
+ if (
+ scaling_config_action.get("trigger") == "post-scale-in"
+ and scaling_type == "SCALE_IN"
+ ) or (
+ scaling_config_action.get("trigger") == "post-scale-out"
+ and scaling_type == "SCALE_OUT"
+ ):
+ vnf_config_primitive = scaling_config_action[
+ "vnf-config-primitive-name-ref"
+ ]
+ step = db_nslcmop_update[
+ "detailed-status"
+ ] = "executing post-scale scaling-config-action '{}'".format(
+ vnf_config_primitive
+ )
+
+ vnfr_params = {"VDU_SCALE_INFO": scaling_info}
+ if db_vnfr.get("additionalParamsForVnf"):
+ vnfr_params.update(db_vnfr["additionalParamsForVnf"])
+
+ # look for primitive
+ for config_primitive in (
+ get_configuration(db_vnfd, db_vnfd["id"]) or {}
+ ).get("config-primitive", ()):
+ if config_primitive["name"] == vnf_config_primitive:
+ break
+ else:
+ raise LcmException(
+ "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
+ "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
+ "config-primitive".format(
+ scaling_group, vnf_config_primitive
+ )
+ )
+ scale_process = "VCA"
+ db_nsr_update["config-status"] = "configuring post-scaling"
+ primitive_params = self._map_primitive_params(
+ config_primitive, {}, vnfr_params
+ )
+
+ # Post-scale retry check: Check if this sub-operation has been executed before
+ op_index = self._check_or_add_scale_suboperation(
+ db_nslcmop,
+ vnf_index,
+ vnf_config_primitive,
+ primitive_params,
+ "POST-SCALE",
+ )
+ if op_index == self.SUBOPERATION_STATUS_SKIP:
+ # Skip sub-operation
+ result = "COMPLETED"
+ result_detail = "Done"
+ self.logger.debug(
+ logging_text
+ + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
+ vnf_config_primitive, result, result_detail
+ )
+ )
+ else:
+ if op_index == self.SUBOPERATION_STATUS_NEW:
+ # New sub-operation: Get index of this sub-operation
+ op_index = (
+ len(db_nslcmop.get("_admin", {}).get("operations"))
+ - 1
+ )
+ self.logger.debug(
+ logging_text
+ + "vnf_config_primitive={} New sub-operation".format(
+ vnf_config_primitive
+ )
+ )
+ else:
+ # retry: Get registered params for this existing sub-operation
+ op = db_nslcmop.get("_admin", {}).get("operations", [])[
+ op_index
+ ]
+ vnf_index = op.get("member_vnf_index")
+ vnf_config_primitive = op.get("primitive")
+ primitive_params = op.get("primitive_params")
+ self.logger.debug(
+ logging_text
+ + "vnf_config_primitive={} Sub-operation retry".format(
+ vnf_config_primitive
+ )
+ )
+ # Execute the primitive, either with new (first-time) or registered (reintent) args
+ ee_descriptor_id = config_primitive.get(
+ "execution-environment-ref"
+ )
+ primitive_name = config_primitive.get(
+ "execution-environment-primitive", vnf_config_primitive
+ )
+ ee_id, vca_type = self._look_for_deployed_vca(
+ nsr_deployed["VCA"],
+ member_vnf_index=vnf_index,
+ vdu_id=None,
+ vdu_count_index=None,
+ ee_descriptor_id=ee_descriptor_id,
+ )
+ result, result_detail = await self._ns_execute_primitive(
+ ee_id,
+ primitive_name,
+ primitive_params,
+ vca_type=vca_type,
+ vca_id=vca_id,
+ )
+ self.logger.debug(
+ logging_text
+ + "vnf_config_primitive={} Done with result {} {}".format(
+ vnf_config_primitive, result, result_detail
+ )
+ )
+ # Update operationState = COMPLETED | FAILED
+ self._update_suboperation_status(
+ db_nslcmop, op_index, result, result_detail
+ )
+
+ if result == "FAILED":
+ raise LcmException(result_detail)
+ db_nsr_update["config-status"] = old_config_status
+ scale_process = None
+ # POST-SCALE END
+
+ db_nsr_update[
+ "detailed-status"
+ ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
+ db_nsr_update["operational-status"] = (
+ "running"
+ if old_operational_status == "failed"
+ else old_operational_status
+ )
+ db_nsr_update["config-status"] = old_config_status
+ return
+ except (
+ ROclient.ROClientException,
+ DbException,
+ LcmException,
+ NgRoException,
+ ) as e:
+ self.logger.error(logging_text + "Exit Exception {}".format(e))
+ exc = e
+ except asyncio.CancelledError:
+ self.logger.error(
+ logging_text + "Cancelled Exception while '{}'".format(step)
+ )
+ exc = "Operation was cancelled"
+ except Exception as e:
+ exc = traceback.format_exc()
+ self.logger.critical(
+ logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
+ exc_info=True,
+ )
+ finally:
+ self._write_ns_status(
+ nsr_id=nsr_id,
+ ns_state=None,
+ current_operation="IDLE",
+ current_operation_id=None,
+ )
+ if tasks_dict_info:
+ stage[1] = "Waiting for instantiate pending tasks."
+ self.logger.debug(logging_text + stage[1])
+ exc = await self._wait_for_tasks(
+ logging_text,
+ tasks_dict_info,
+ self.timeout_ns_deploy,
+ stage,
+ nslcmop_id,
+ nsr_id=nsr_id,
+ )
+ if exc:
+ db_nslcmop_update[
+ "detailed-status"
+ ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
+ nslcmop_operation_state = "FAILED"
+ if db_nsr:
+ db_nsr_update["operational-status"] = old_operational_status
+ db_nsr_update["config-status"] = old_config_status
+ db_nsr_update["detailed-status"] = ""
+ if scale_process:
+ if "VCA" in scale_process:
+ db_nsr_update["config-status"] = "failed"
+ if "RO" in scale_process:
+ db_nsr_update["operational-status"] = "failed"
+ db_nsr_update[
+ "detailed-status"
+ ] = "FAILED scaling nslcmop={} {}: {}".format(
+ nslcmop_id, step, exc
+ )
+ else:
+ error_description_nslcmop = None
+ nslcmop_operation_state = "COMPLETED"
+ db_nslcmop_update["detailed-status"] = "Done"