+
+ def _deploy_n2vc(self, logging_text, db_nsr, db_vnfr, nslcmop_id, nsr_id, nsi_id, vnfd_id, vdu_id,
+ kdu_name, member_vnf_index, vdu_index, vdu_name, deploy_params, descriptor_config,
+ base_folder, task_instantiation_info, stage):
+ # launch instantiate_N2VC in a asyncio task and register task object
+ # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
+ # if not found, create one entry and update database
+ # fill db_nsr._admin.deployed.VCA.<index>
+
+ self.logger.debug(logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id))
+ if descriptor_config.get("juju"): # There is one execution envioronment of type juju
+ ee_list = [descriptor_config]
+ elif descriptor_config.get("execution-environment-list"):
+ ee_list = descriptor_config.get("execution-environment-list")
+ else: # other types as script are not supported
+ ee_list = []
+
+ for ee_item in ee_list:
+ self.logger.debug(logging_text + "_deploy_n2vc ee_item juju={}, helm={}".format(ee_item.get('juju'),
+ ee_item.get("helm-chart")))
+ ee_descriptor_id = ee_item.get("id")
+ if ee_item.get("juju"):
+ vca_name = ee_item['juju'].get('charm')
+ vca_type = "lxc_proxy_charm" if ee_item['juju'].get('charm') is not None else "native_charm"
+ if ee_item['juju'].get('cloud') == "k8s":
+ vca_type = "k8s_proxy_charm"
+ elif ee_item['juju'].get('proxy') is False:
+ vca_type = "native_charm"
+ elif ee_item.get("helm-chart"):
+ vca_name = ee_item['helm-chart']
+ if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
+ vca_type = "helm"
+ else:
+ vca_type = "helm-v3"
+ else:
+ self.logger.debug(logging_text + "skipping non juju neither charm configuration")
+ continue
+
+ vca_index = -1
+ for vca_index, vca_deployed in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
+ if not vca_deployed:
+ continue
+ if vca_deployed.get("member-vnf-index") == member_vnf_index and \
+ vca_deployed.get("vdu_id") == vdu_id and \
+ vca_deployed.get("kdu_name") == kdu_name and \
+ vca_deployed.get("vdu_count_index", 0) == vdu_index and \
+ vca_deployed.get("ee_descriptor_id") == ee_descriptor_id:
+ break
+ else:
+ # not found, create one.
+ target = "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
+ if vdu_id:
+ target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
+ elif kdu_name:
+ target += "/kdu/{}".format(kdu_name)
+ vca_deployed = {
+ "target_element": target,
+ # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
+ "member-vnf-index": member_vnf_index,
+ "vdu_id": vdu_id,
+ "kdu_name": kdu_name,
+ "vdu_count_index": vdu_index,
+ "operational-status": "init", # TODO revise
+ "detailed-status": "", # TODO revise
+ "step": "initial-deploy", # TODO revise
+ "vnfd_id": vnfd_id,
+ "vdu_name": vdu_name,
+ "type": vca_type,
+ "ee_descriptor_id": ee_descriptor_id
+ }
+ vca_index += 1
+
+ # create VCA and configurationStatus in db
+ db_dict = {
+ "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
+ "configurationStatus.{}".format(vca_index): dict()
+ }
+ self.update_db_2("nsrs", nsr_id, db_dict)
+
+ db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
+
+ # Launch task
+ task_n2vc = asyncio.ensure_future(
+ self.instantiate_N2VC(
+ logging_text=logging_text,
+ vca_index=vca_index,
+ nsi_id=nsi_id,
+ db_nsr=db_nsr,
+ db_vnfr=db_vnfr,
+ vdu_id=vdu_id,
+ kdu_name=kdu_name,
+ vdu_index=vdu_index,
+ deploy_params=deploy_params,
+ config_descriptor=descriptor_config,
+ base_folder=base_folder,
+ nslcmop_id=nslcmop_id,
+ stage=stage,
+ vca_type=vca_type,
+ vca_name=vca_name,
+ ee_config_descriptor=ee_item
+ )
+ )
+ self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_N2VC-{}".format(vca_index), task_n2vc)
+ task_instantiation_info[task_n2vc] = self.task_name_deploy_vca + " {}.{}".format(
+ member_vnf_index or "", vdu_id or "")
+
+ @staticmethod
+ def _get_terminate_config_primitive(primitive_list, vca_deployed):
+ """ Get a sorted terminate config primitive list. In case ee_descriptor_id is present at vca_deployed,
+ it get only those primitives for this execution envirom"""
+
+ primitive_list = primitive_list or []
+ # filter primitives by ee_descriptor_id
+ ee_descriptor_id = vca_deployed.get("ee_descriptor_id")
+ primitive_list = [p for p in primitive_list if p.get("execution-environment-ref") == ee_descriptor_id]
+
+ if primitive_list:
+ primitive_list.sort(key=lambda val: int(val['seq']))
+
+ return primitive_list
+
+ @staticmethod
+ def _create_nslcmop(nsr_id, operation, params):
+ """
+ Creates a ns-lcm-opp content to be stored at database.
+ :param nsr_id: internal id of the instance
+ :param operation: instantiate, terminate, scale, action, ...
+ :param params: user parameters for the operation
+ :return: dictionary following SOL005 format
+ """
+ # Raise exception if invalid arguments
+ if not (nsr_id and operation and params):
+ raise LcmException(
+ "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided")
+ now = time()
+ _id = str(uuid4())
+ nslcmop = {
+ "id": _id,
+ "_id": _id,
+ # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
+ "operationState": "PROCESSING",
+ "statusEnteredTime": now,
+ "nsInstanceId": nsr_id,
+ "lcmOperationType": operation,
+ "startTime": now,
+ "isAutomaticInvocation": False,
+ "operationParams": params,
+ "isCancelPending": False,
+ "links": {
+ "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
+ "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
+ }
+ }
+ return nslcmop
+
+ def _format_additional_params(self, params):
+ params = params or {}
+ for key, value in params.items():
+ if str(value).startswith("!!yaml "):
+ params[key] = yaml.safe_load(value[7:])
+ return params
+
+ def _get_terminate_primitive_params(self, seq, vnf_index):
+ primitive = seq.get('name')
+ primitive_params = {}
+ params = {
+ "member_vnf_index": vnf_index,
+ "primitive": primitive,
+ "primitive_params": primitive_params,
+ }
+ desc_params = {}
+ return self._map_primitive_params(seq, params, desc_params)
+
+ # sub-operations
+
+ def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
+ op = deep_get(db_nslcmop, ('_admin', 'operations'), [])[op_index]
+ if op.get('operationState') == 'COMPLETED':
+ # b. Skip sub-operation
+ # _ns_execute_primitive() or RO.create_action() will NOT be executed
+ return self.SUBOPERATION_STATUS_SKIP
+ else:
+ # c. retry executing sub-operation
+ # The sub-operation exists, and operationState != 'COMPLETED'
+ # Update operationState = 'PROCESSING' to indicate a retry.
+ operationState = 'PROCESSING'
+ detailed_status = 'In progress'
+ self._update_suboperation_status(
+ db_nslcmop, op_index, operationState, detailed_status)
+ # Return the sub-operation index
+ # _ns_execute_primitive() or RO.create_action() will be called from scale()
+ # with arguments extracted from the sub-operation
+ return op_index
+
+ # Find a sub-operation where all keys in a matching dictionary must match
+ # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
+ def _find_suboperation(self, db_nslcmop, match):
+ if db_nslcmop and match:
+ op_list = db_nslcmop.get('_admin', {}).get('operations', [])
+ for i, op in enumerate(op_list):
+ if all(op.get(k) == match[k] for k in match):
+ return i
+ return self.SUBOPERATION_STATUS_NOT_FOUND
+
+ # Update status for a sub-operation given its index
+ def _update_suboperation_status(self, db_nslcmop, op_index, operationState, detailed_status):
+ # Update DB for HA tasks
+ q_filter = {'_id': db_nslcmop['_id']}
+ update_dict = {'_admin.operations.{}.operationState'.format(op_index): operationState,
+ '_admin.operations.{}.detailed-status'.format(op_index): detailed_status}
+ self.db.set_one("nslcmops",
+ q_filter=q_filter,
+ update_dict=update_dict,
+ fail_on_empty=False)
+
+ # Add sub-operation, return the index of the added sub-operation
+ # Optionally, set operationState, detailed-status, and operationType
+ # Status and type are currently set for 'scale' sub-operations:
+ # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
+ # 'detailed-status' : status message
+ # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
+ # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
+ def _add_suboperation(self, db_nslcmop, vnf_index, vdu_id, vdu_count_index, vdu_name, primitive,
+ mapped_primitive_params, operationState=None, detailed_status=None, operationType=None,
+ RO_nsr_id=None, RO_scaling_info=None):
+ if not db_nslcmop:
+ return self.SUBOPERATION_STATUS_NOT_FOUND
+ # Get the "_admin.operations" list, if it exists
+ db_nslcmop_admin = db_nslcmop.get('_admin', {})
+ op_list = db_nslcmop_admin.get('operations')
+ # Create or append to the "_admin.operations" list
+ new_op = {'member_vnf_index': vnf_index,
+ 'vdu_id': vdu_id,
+ 'vdu_count_index': vdu_count_index,
+ 'primitive': primitive,
+ 'primitive_params': mapped_primitive_params}
+ if operationState:
+ new_op['operationState'] = operationState
+ if detailed_status:
+ new_op['detailed-status'] = detailed_status
+ if operationType:
+ new_op['lcmOperationType'] = operationType
+ if RO_nsr_id:
+ new_op['RO_nsr_id'] = RO_nsr_id
+ if RO_scaling_info:
+ new_op['RO_scaling_info'] = RO_scaling_info
+ if not op_list:
+ # No existing operations, create key 'operations' with current operation as first list element
+ db_nslcmop_admin.update({'operations': [new_op]})
+ op_list = db_nslcmop_admin.get('operations')
+ else:
+ # Existing operations, append operation to list
+ op_list.append(new_op)
+
+ db_nslcmop_update = {'_admin.operations': op_list}
+ self.update_db_2("nslcmops", db_nslcmop['_id'], db_nslcmop_update)
+ op_index = len(op_list) - 1
+ return op_index
+
+ # Helper methods for scale() sub-operations
+
+ # pre-scale/post-scale:
+ # Check for 3 different cases:
+ # a. New: First time execution, return SUBOPERATION_STATUS_NEW
+ # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
+ # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
+ def _check_or_add_scale_suboperation(self, db_nslcmop, vnf_index, vnf_config_primitive, primitive_params,
+ operationType, RO_nsr_id=None, RO_scaling_info=None):
+ # Find this sub-operation
+ if RO_nsr_id and RO_scaling_info:
+ operationType = 'SCALE-RO'
+ match = {
+ 'member_vnf_index': vnf_index,
+ 'RO_nsr_id': RO_nsr_id,
+ 'RO_scaling_info': RO_scaling_info,
+ }
+ else:
+ match = {
+ 'member_vnf_index': vnf_index,
+ 'primitive': vnf_config_primitive,
+ 'primitive_params': primitive_params,
+ 'lcmOperationType': operationType
+ }
+ op_index = self._find_suboperation(db_nslcmop, match)
+ if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
+ # a. New sub-operation
+ # The sub-operation does not exist, add it.
+ # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
+ # The following parameters are set to None for all kind of scaling:
+ vdu_id = None
+ vdu_count_index = None
+ vdu_name = None
+ if RO_nsr_id and RO_scaling_info:
+ vnf_config_primitive = None
+ primitive_params = None
+ else:
+ RO_nsr_id = None
+ RO_scaling_info = None
+ # Initial status for sub-operation
+ operationState = 'PROCESSING'
+ detailed_status = 'In progress'
+ # Add sub-operation for pre/post-scaling (zero or more operations)
+ self._add_suboperation(db_nslcmop,
+ vnf_index,
+ vdu_id,
+ vdu_count_index,
+ vdu_name,
+ vnf_config_primitive,
+ primitive_params,
+ operationState,
+ detailed_status,
+ operationType,
+ RO_nsr_id,
+ RO_scaling_info)
+ return self.SUBOPERATION_STATUS_NEW
+ else:
+ # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
+ # or op_index (operationState != 'COMPLETED')
+ return self._retry_or_skip_suboperation(db_nslcmop, op_index)
+
+ # Function to return execution_environment id
+
+ def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
+ # TODO vdu_index_count
+ for vca in vca_deployed_list:
+ if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
+ return vca["ee_id"]
+
+ async def destroy_N2VC(self, logging_text, db_nslcmop, vca_deployed, config_descriptor,
+ vca_index, destroy_ee=True, exec_primitives=True):
+ """
+ Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
+ :param logging_text:
+ :param db_nslcmop:
+ :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
+ :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
+ :param vca_index: index in the database _admin.deployed.VCA
+ :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
+ :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
+ not executed properly
+ :return: None or exception
+ """
+
+ self.logger.debug(
+ logging_text + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
+ vca_index, vca_deployed, config_descriptor, destroy_ee
+ )
+ )
+
+ vca_type = vca_deployed.get("type", "lxc_proxy_charm")
+
+ # execute terminate_primitives
+ if exec_primitives:
+ terminate_primitives = self._get_terminate_config_primitive(
+ config_descriptor.get("terminate-config-primitive"), vca_deployed)
+ vdu_id = vca_deployed.get("vdu_id")
+ vdu_count_index = vca_deployed.get("vdu_count_index")
+ vdu_name = vca_deployed.get("vdu_name")
+ vnf_index = vca_deployed.get("member-vnf-index")
+ if terminate_primitives and vca_deployed.get("needed_terminate"):
+ for seq in terminate_primitives:
+ # For each sequence in list, get primitive and call _ns_execute_primitive()
+ step = "Calling terminate action for vnf_member_index={} primitive={}".format(
+ vnf_index, seq.get("name"))
+ self.logger.debug(logging_text + step)
+ # Create the primitive for each sequence, i.e. "primitive": "touch"
+ primitive = seq.get('name')
+ mapped_primitive_params = self._get_terminate_primitive_params(seq, vnf_index)
+
+ # Add sub-operation
+ self._add_suboperation(db_nslcmop,
+ vnf_index,
+ vdu_id,
+ vdu_count_index,
+ vdu_name,
+ primitive,
+ mapped_primitive_params)
+ # Sub-operations: Call _ns_execute_primitive() instead of action()
+ try:
+ result, result_detail = await self._ns_execute_primitive(vca_deployed["ee_id"], primitive,
+ mapped_primitive_params,
+ vca_type=vca_type)
+ except LcmException:
+ # this happens when VCA is not deployed. In this case it is not needed to terminate
+ continue
+ result_ok = ['COMPLETED', 'PARTIALLY_COMPLETED']
+ if result not in result_ok:
+ raise LcmException("terminate_primitive {} for vnf_member_index={} fails with "
+ "error {}".format(seq.get("name"), vnf_index, result_detail))
+ # set that this VCA do not need terminated
+ db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(vca_index)
+ self.update_db_2("nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False})
+
+ if vca_deployed.get("prometheus_jobs") and self.prometheus:
+ await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"])
+
+ if destroy_ee:
+ await self.vca_map[vca_type].delete_execution_environment(vca_deployed["ee_id"])
+
+ async def _delete_all_N2VC(self, db_nsr: dict):
+ self._write_all_config_status(db_nsr=db_nsr, status='TERMINATING')
+ namespace = "." + db_nsr["_id"]
+ try:
+ await self.n2vc.delete_namespace(namespace=namespace, total_timeout=self.timeout_charm_delete)
+ except N2VCNotFound: # already deleted. Skip
+ pass
+ self._write_all_config_status(db_nsr=db_nsr, status='DELETED')
+
+ async def _terminate_RO(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage):
+ """
+ Terminates a deployment from RO
+ :param logging_text:
+ :param nsr_deployed: db_nsr._admin.deployed
+ :param nsr_id:
+ :param nslcmop_id:
+ :param stage: list of string with the content to write on db_nslcmop.detailed-status.
+ this method will update only the index 2, but it will write on database the concatenated content of the list
+ :return:
+ """
+ db_nsr_update = {}
+ failed_detail = []
+ ro_nsr_id = ro_delete_action = None
+ if nsr_deployed and nsr_deployed.get("RO"):
+ ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
+ ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
+ try:
+ if ro_nsr_id:
+ stage[2] = "Deleting ns from VIM."
+ db_nsr_update["detailed-status"] = " ".join(stage)
+ self._write_op_status(nslcmop_id, stage)
+ self.logger.debug(logging_text + stage[2])
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ self._write_op_status(nslcmop_id, stage)
+ desc = await self.RO.delete("ns", ro_nsr_id)
+ ro_delete_action = desc["action_id"]
+ db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = ro_delete_action
+ db_nsr_update["_admin.deployed.RO.nsr_id"] = None
+ db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
+ if ro_delete_action:
+ # wait until NS is deleted from VIM
+ stage[2] = "Waiting ns deleted from VIM."
+ detailed_status_old = None
+ self.logger.debug(logging_text + stage[2] + " RO_id={} ro_delete_action={}".format(ro_nsr_id,
+ ro_delete_action))
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ self._write_op_status(nslcmop_id, stage)
+
+ delete_timeout = 20 * 60 # 20 minutes
+ while delete_timeout > 0:
+ desc = await self.RO.show(
+ "ns",
+ item_id_name=ro_nsr_id,
+ extra_item="action",
+ extra_item_id=ro_delete_action)
+
+ # deploymentStatus
+ self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
+
+ ns_status, ns_status_info = self.RO.check_action_status(desc)
+ if ns_status == "ERROR":
+ raise ROclient.ROClientException(ns_status_info)
+ elif ns_status == "BUILD":
+ stage[2] = "Deleting from VIM {}".format(ns_status_info)
+ elif ns_status == "ACTIVE":
+ db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
+ db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
+ break
+ else:
+ assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
+ if stage[2] != detailed_status_old:
+ detailed_status_old = stage[2]
+ db_nsr_update["detailed-status"] = " ".join(stage)
+ self._write_op_status(nslcmop_id, stage)
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ await asyncio.sleep(5, loop=self.loop)
+ delete_timeout -= 5
+ else: # delete_timeout <= 0:
+ raise ROclient.ROClientException("Timeout waiting ns deleted from VIM")
+
+ except Exception as e:
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
+ db_nsr_update["_admin.deployed.RO.nsr_id"] = None
+ db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
+ db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
+ self.logger.debug(logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id))
+ elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
+ failed_detail.append("delete conflict: {}".format(e))
+ self.logger.debug(logging_text + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e))
+ else:
+ failed_detail.append("delete error: {}".format(e))
+ self.logger.error(logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e))
+
+ # Delete nsd
+ if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
+ ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
+ try:
+ stage[2] = "Deleting nsd from RO."
+ db_nsr_update["detailed-status"] = " ".join(stage)
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ self._write_op_status(nslcmop_id, stage)
+ await self.RO.delete("nsd", ro_nsd_id)
+ self.logger.debug(logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id))
+ db_nsr_update["_admin.deployed.RO.nsd_id"] = None
+ except Exception as e:
+ if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
+ db_nsr_update["_admin.deployed.RO.nsd_id"] = None
+ self.logger.debug(logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id))
+ elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
+ failed_detail.append("ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e))
+ self.logger.debug(logging_text + failed_detail[-1])
+ else:
+ failed_detail.append("ro_nsd_id={} delete error: {}".format(ro_nsd_id, e))
+ self.logger.error(logging_text + failed_detail[-1])
+
+ if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
+ for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
+ if not vnf_deployed or not vnf_deployed["id"]:
+ continue