+ "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
+ ] = nb_scale_op
+ db_nsr_update[
+ "_admin.scaling-group.{}.time".format(admin_scale_index)
+ ] = time()
+
+ # SCALE-IN VCA - BEGIN
+ if vca_scaling_info:
+ step = db_nslcmop_update[
+ "detailed-status"
+ ] = "Deleting the execution environments"
+ scale_process = "VCA"
+ for vca_info in vca_scaling_info:
+ if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
+ member_vnf_index = str(vca_info["member-vnf-index"])
+ self.logger.debug(
+ logging_text + "vdu info: {}".format(vca_info)
+ )
+ if vca_info.get("osm_vdu_id"):
+ vdu_id = vca_info["osm_vdu_id"]
+ vdu_index = int(vca_info["vdu_index"])
+ stage[
+ 1
+ ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
+ member_vnf_index, vdu_id, vdu_index
+ )
+ stage[2] = step = "Scaling in VCA"
+ self._write_op_status(op_id=nslcmop_id, stage=stage)
+ vca_update = db_nsr["_admin"]["deployed"]["VCA"]
+ config_update = db_nsr["configurationStatus"]
+ for vca_index, vca in enumerate(vca_update):
+ if (
+ (vca or vca.get("ee_id"))
+ and vca["member-vnf-index"] == member_vnf_index
+ and vca["vdu_count_index"] == vdu_index
+ ):
+ if vca.get("vdu_id"):
+ config_descriptor = get_configuration(
+ db_vnfd, vca.get("vdu_id")
+ )
+ elif vca.get("kdu_name"):
+ config_descriptor = get_configuration(
+ db_vnfd, vca.get("kdu_name")
+ )
+ else:
+ config_descriptor = get_configuration(
+ db_vnfd, db_vnfd["id"]
+ )
+ operation_params = (
+ db_nslcmop.get("operationParams") or {}
+ )
+ exec_terminate_primitives = not operation_params.get(
+ "skip_terminate_primitives"
+ ) and vca.get("needed_terminate")
+ task = asyncio.ensure_future(
+ asyncio.wait_for(
+ self.destroy_N2VC(
+ logging_text,
+ db_nslcmop,
+ vca,
+ config_descriptor,
+ vca_index,
+ destroy_ee=True,
+ exec_primitives=exec_terminate_primitives,
+ scaling_in=True,
+ vca_id=vca_id,
+ ),
+ timeout=self.timeout_charm_delete,
+ )
+ )
+ tasks_dict_info[task] = "Terminating VCA {}".format(
+ vca.get("ee_id")
+ )
+ del vca_update[vca_index]
+ del config_update[vca_index]
+ # wait for pending tasks of terminate primitives
+ if tasks_dict_info:
+ self.logger.debug(
+ logging_text
+ + "Waiting for tasks {}".format(
+ list(tasks_dict_info.keys())
+ )
+ )
+ error_list = await self._wait_for_tasks(
+ logging_text,
+ tasks_dict_info,
+ min(
+ self.timeout_charm_delete, self.timeout_ns_terminate
+ ),
+ stage,
+ nslcmop_id,
+ )
+ tasks_dict_info.clear()
+ if error_list:
+ raise LcmException("; ".join(error_list))
+
+ db_vca_and_config_update = {
+ "_admin.deployed.VCA": vca_update,
+ "configurationStatus": config_update,
+ }
+ self.update_db_2(
+ "nsrs", db_nsr["_id"], db_vca_and_config_update
+ )
+ scale_process = None
+ # SCALE-IN VCA - END
+
+ # SCALE RO - BEGIN
+ if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
+ scale_process = "RO"
+ if self.ro_config.get("ng"):
+ await self._scale_ng_ro(
+ logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
+ )
+ scaling_info.pop("vdu-create", None)
+ scaling_info.pop("vdu-delete", None)
+
+ scale_process = None
+ # SCALE RO - END
+
+ # SCALE KDU - BEGIN
+ if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
+ scale_process = "KDU"
+ await self._scale_kdu(
+ logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
+ )
+ scaling_info.pop("kdu-create", None)
+ scaling_info.pop("kdu-delete", None)
+
+ scale_process = None
+ # SCALE KDU - END
+
+ if db_nsr_update:
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+
+ # SCALE-UP VCA - BEGIN
+ if vca_scaling_info:
+ step = db_nslcmop_update[
+ "detailed-status"
+ ] = "Creating new execution environments"
+ scale_process = "VCA"
+ for vca_info in vca_scaling_info:
+ if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
+ member_vnf_index = str(vca_info["member-vnf-index"])
+ self.logger.debug(
+ logging_text + "vdu info: {}".format(vca_info)
+ )
+ vnfd_id = db_vnfr["vnfd-ref"]
+ if vca_info.get("osm_vdu_id"):
+ vdu_index = int(vca_info["vdu_index"])
+ deploy_params = {"OSM": get_osm_params(db_vnfr)}
+ if db_vnfr.get("additionalParamsForVnf"):
+ deploy_params.update(
+ parse_yaml_strings(
+ db_vnfr["additionalParamsForVnf"].copy()
+ )
+ )
+ descriptor_config = get_configuration(
+ db_vnfd, db_vnfd["id"]
+ )
+ if descriptor_config:
+ vdu_id = None
+ vdu_name = None
+ kdu_name = None
+ self._deploy_n2vc(
+ logging_text=logging_text
+ + "member_vnf_index={} ".format(member_vnf_index),
+ db_nsr=db_nsr,
+ db_vnfr=db_vnfr,
+ nslcmop_id=nslcmop_id,
+ nsr_id=nsr_id,
+ nsi_id=nsi_id,
+ vnfd_id=vnfd_id,
+ vdu_id=vdu_id,
+ kdu_name=kdu_name,
+ member_vnf_index=member_vnf_index,
+ vdu_index=vdu_index,
+ vdu_name=vdu_name,
+ deploy_params=deploy_params,
+ descriptor_config=descriptor_config,
+ base_folder=base_folder,
+ task_instantiation_info=tasks_dict_info,
+ stage=stage,
+ )
+ vdu_id = vca_info["osm_vdu_id"]
+ vdur = find_in_list(
+ db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
+ )
+ descriptor_config = get_configuration(db_vnfd, vdu_id)
+ if vdur.get("additionalParams"):
+ deploy_params_vdu = parse_yaml_strings(
+ vdur["additionalParams"]
+ )
+ else:
+ deploy_params_vdu = deploy_params
+ deploy_params_vdu["OSM"] = get_osm_params(
+ db_vnfr, vdu_id, vdu_count_index=vdu_index
+ )
+ if descriptor_config:
+ vdu_name = None
+ kdu_name = None
+ stage[
+ 1
+ ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
+ member_vnf_index, vdu_id, vdu_index
+ )
+ stage[2] = step = "Scaling out VCA"
+ self._write_op_status(op_id=nslcmop_id, stage=stage)
+ self._deploy_n2vc(
+ logging_text=logging_text
+ + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
+ member_vnf_index, vdu_id, vdu_index
+ ),
+ db_nsr=db_nsr,
+ db_vnfr=db_vnfr,
+ nslcmop_id=nslcmop_id,
+ nsr_id=nsr_id,
+ nsi_id=nsi_id,
+ vnfd_id=vnfd_id,
+ vdu_id=vdu_id,
+ kdu_name=kdu_name,
+ member_vnf_index=member_vnf_index,
+ vdu_index=vdu_index,
+ vdu_name=vdu_name,
+ deploy_params=deploy_params_vdu,
+ descriptor_config=descriptor_config,
+ base_folder=base_folder,
+ task_instantiation_info=tasks_dict_info,
+ stage=stage,
+ )
+ # SCALE-UP VCA - END
+ scale_process = None
+
+ # POST-SCALE BEGIN
+ # execute primitive service POST-SCALING
+ step = "Executing post-scale vnf-config-primitive"
+ if scaling_descriptor.get("scaling-config-action"):
+ for scaling_config_action in scaling_descriptor[
+ "scaling-config-action"
+ ]:
+ if (
+ scaling_config_action.get("trigger") == "post-scale-in"
+ and scaling_type == "SCALE_IN"
+ ) or (
+ scaling_config_action.get("trigger") == "post-scale-out"
+ and scaling_type == "SCALE_OUT"
+ ):
+ vnf_config_primitive = scaling_config_action[
+ "vnf-config-primitive-name-ref"
+ ]
+ step = db_nslcmop_update[
+ "detailed-status"
+ ] = "executing post-scale scaling-config-action '{}'".format(
+ vnf_config_primitive
+ )
+
+ vnfr_params = {"VDU_SCALE_INFO": scaling_info}
+ if db_vnfr.get("additionalParamsForVnf"):
+ vnfr_params.update(db_vnfr["additionalParamsForVnf"])
+
+ # look for primitive
+ for config_primitive in (
+ get_configuration(db_vnfd, db_vnfd["id"]) or {}
+ ).get("config-primitive", ()):
+ if config_primitive["name"] == vnf_config_primitive:
+ break
+ else:
+ raise LcmException(
+ "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
+ "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
+ "config-primitive".format(
+ scaling_group, vnf_config_primitive
+ )
+ )
+ scale_process = "VCA"
+ db_nsr_update["config-status"] = "configuring post-scaling"
+ primitive_params = self._map_primitive_params(
+ config_primitive, {}, vnfr_params
+ )
+
+ # Post-scale retry check: Check if this sub-operation has been executed before
+ op_index = self._check_or_add_scale_suboperation(
+ db_nslcmop,
+ vnf_index,
+ vnf_config_primitive,
+ primitive_params,
+ "POST-SCALE",
+ )
+ if op_index == self.SUBOPERATION_STATUS_SKIP:
+ # Skip sub-operation
+ result = "COMPLETED"
+ result_detail = "Done"
+ self.logger.debug(
+ logging_text
+ + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
+ vnf_config_primitive, result, result_detail
+ )
+ )
+ else:
+ if op_index == self.SUBOPERATION_STATUS_NEW:
+ # New sub-operation: Get index of this sub-operation
+ op_index = (
+ len(db_nslcmop.get("_admin", {}).get("operations"))
+ - 1
+ )
+ self.logger.debug(
+ logging_text
+ + "vnf_config_primitive={} New sub-operation".format(
+ vnf_config_primitive
+ )
+ )
+ else:
+ # retry: Get registered params for this existing sub-operation
+ op = db_nslcmop.get("_admin", {}).get("operations", [])[
+ op_index
+ ]
+ vnf_index = op.get("member_vnf_index")
+ vnf_config_primitive = op.get("primitive")
+ primitive_params = op.get("primitive_params")
+ self.logger.debug(
+ logging_text
+ + "vnf_config_primitive={} Sub-operation retry".format(
+ vnf_config_primitive
+ )
+ )
+ # Execute the primitive, either with new (first-time) or registered (reintent) args
+ ee_descriptor_id = config_primitive.get(
+ "execution-environment-ref"
+ )
+ primitive_name = config_primitive.get(
+ "execution-environment-primitive", vnf_config_primitive
+ )
+ ee_id, vca_type = self._look_for_deployed_vca(
+ nsr_deployed["VCA"],
+ member_vnf_index=vnf_index,
+ vdu_id=None,
+ vdu_count_index=None,
+ ee_descriptor_id=ee_descriptor_id,
+ )
+ result, result_detail = await self._ns_execute_primitive(
+ ee_id,
+ primitive_name,
+ primitive_params,
+ vca_type=vca_type,
+ vca_id=vca_id,
+ )
+ self.logger.debug(
+ logging_text
+ + "vnf_config_primitive={} Done with result {} {}".format(
+ vnf_config_primitive, result, result_detail
+ )
+ )
+ # Update operationState = COMPLETED | FAILED
+ self._update_suboperation_status(
+ db_nslcmop, op_index, result, result_detail
+ )
+
+ if result == "FAILED":
+ raise LcmException(result_detail)
+ db_nsr_update["config-status"] = old_config_status
+ scale_process = None
+ # POST-SCALE END
+
+ db_nsr_update[
+ "detailed-status"
+ ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
+ db_nsr_update["operational-status"] = (
+ "running"
+ if old_operational_status == "failed"
+ else old_operational_status
+ )
+ db_nsr_update["config-status"] = old_config_status
+ return
+ except (
+ ROclient.ROClientException,
+ DbException,
+ LcmException,
+ NgRoException,
+ ) as e:
+ self.logger.error(logging_text + "Exit Exception {}".format(e))
+ exc = e
+ except asyncio.CancelledError:
+ self.logger.error(
+ logging_text + "Cancelled Exception while '{}'".format(step)
+ )
+ exc = "Operation was cancelled"
+ except Exception as e:
+ exc = traceback.format_exc()
+ self.logger.critical(
+ logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
+ exc_info=True,
+ )
+ finally:
+ self._write_ns_status(
+ nsr_id=nsr_id,
+ ns_state=None,
+ current_operation="IDLE",
+ current_operation_id=None,
+ )
+ if tasks_dict_info:
+ stage[1] = "Waiting for instantiate pending tasks."
+ self.logger.debug(logging_text + stage[1])
+ exc = await self._wait_for_tasks(
+ logging_text,
+ tasks_dict_info,
+ self.timeout_ns_deploy,
+ stage,
+ nslcmop_id,
+ nsr_id=nsr_id,
+ )
+ if exc:
+ db_nslcmop_update[
+ "detailed-status"
+ ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
+ nslcmop_operation_state = "FAILED"
+ if db_nsr:
+ db_nsr_update["operational-status"] = old_operational_status
+ db_nsr_update["config-status"] = old_config_status
+ db_nsr_update["detailed-status"] = ""
+ if scale_process:
+ if "VCA" in scale_process:
+ db_nsr_update["config-status"] = "failed"
+ if "RO" in scale_process:
+ db_nsr_update["operational-status"] = "failed"
+ db_nsr_update[
+ "detailed-status"
+ ] = "FAILED scaling nslcmop={} {}: {}".format(
+ nslcmop_id, step, exc
+ )
+ else:
+ error_description_nslcmop = None
+ nslcmop_operation_state = "COMPLETED"
+ db_nslcmop_update["detailed-status"] = "Done"
+
+ self._write_op_status(
+ op_id=nslcmop_id,
+ stage="",
+ error_message=error_description_nslcmop,
+ operation_state=nslcmop_operation_state,
+ other_update=db_nslcmop_update,
+ )
+ if db_nsr:
+ self._write_ns_status(
+ nsr_id=nsr_id,
+ ns_state=None,
+ current_operation="IDLE",
+ current_operation_id=None,
+ other_update=db_nsr_update,
+ )
+
+ if nslcmop_operation_state:
+ try:
+ msg = {
+ "nsr_id": nsr_id,
+ "nslcmop_id": nslcmop_id,
+ "operationState": nslcmop_operation_state,
+ }
+ await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
+ except Exception as e:
+ self.logger.error(
+ logging_text + "kafka_write notification Exception {}".format(e)
+ )
+ self.logger.debug(logging_text + "Exit")
+ self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
+
+ async def _scale_kdu(
+ self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
+ ):
+ _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
+ for kdu_name in _scaling_info:
+ for kdu_scaling_info in _scaling_info[kdu_name]:
+ deployed_kdu, index = get_deployed_kdu(
+ nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
+ )
+ cluster_uuid = deployed_kdu["k8scluster-uuid"]
+ kdu_instance = deployed_kdu["kdu-instance"]
+ kdu_model = deployed_kdu.get("kdu-model")
+ scale = int(kdu_scaling_info["scale"])
+ k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
+
+ db_dict = {
+ "collection": "nsrs",
+ "filter": {"_id": nsr_id},
+ "path": "_admin.deployed.K8s.{}".format(index),
+ }
+
+ step = "scaling application {}".format(
+ kdu_scaling_info["resource-name"]
+ )
+ self.logger.debug(logging_text + step)
+
+ if kdu_scaling_info["type"] == "delete":
+ kdu_config = get_configuration(db_vnfd, kdu_name)
+ if (
+ kdu_config
+ and kdu_config.get("terminate-config-primitive")
+ and get_juju_ee_ref(db_vnfd, kdu_name) is None
+ ):
+ terminate_config_primitive_list = kdu_config.get(
+ "terminate-config-primitive"
+ )
+ terminate_config_primitive_list.sort(
+ key=lambda val: int(val["seq"])
+ )
+
+ for (
+ terminate_config_primitive
+ ) in terminate_config_primitive_list:
+ primitive_params_ = self._map_primitive_params(
+ terminate_config_primitive, {}, {}
+ )
+ step = "execute terminate config primitive"
+ self.logger.debug(logging_text + step)
+ await asyncio.wait_for(
+ self.k8scluster_map[k8s_cluster_type].exec_primitive(
+ cluster_uuid=cluster_uuid,
+ kdu_instance=kdu_instance,
+ primitive_name=terminate_config_primitive["name"],
+ params=primitive_params_,
+ db_dict=db_dict,
+ vca_id=vca_id,
+ ),
+ timeout=600,
+ )
+
+ await asyncio.wait_for(
+ self.k8scluster_map[k8s_cluster_type].scale(
+ kdu_instance,
+ scale,
+ kdu_scaling_info["resource-name"],
+ vca_id=vca_id,
+ cluster_uuid=cluster_uuid,
+ kdu_model=kdu_model,
+ atomic=True,
+ db_dict=db_dict,
+ ),
+ timeout=self.timeout_vca_on_error,
+ )
+
+ if kdu_scaling_info["type"] == "create":
+ kdu_config = get_configuration(db_vnfd, kdu_name)
+ if (
+ kdu_config
+ and kdu_config.get("initial-config-primitive")
+ and get_juju_ee_ref(db_vnfd, kdu_name) is None
+ ):
+ initial_config_primitive_list = kdu_config.get(
+ "initial-config-primitive"
+ )
+ initial_config_primitive_list.sort(
+ key=lambda val: int(val["seq"])
+ )
+
+ for initial_config_primitive in initial_config_primitive_list:
+ primitive_params_ = self._map_primitive_params(
+ initial_config_primitive, {}, {}
+ )
+ step = "execute initial config primitive"
+ self.logger.debug(logging_text + step)
+ await asyncio.wait_for(
+ self.k8scluster_map[k8s_cluster_type].exec_primitive(
+ cluster_uuid=cluster_uuid,
+ kdu_instance=kdu_instance,
+ primitive_name=initial_config_primitive["name"],
+ params=primitive_params_,
+ db_dict=db_dict,
+ vca_id=vca_id,
+ ),
+ timeout=600,
+ )
+
+ async def _scale_ng_ro(
+ self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
+ ):
+ nsr_id = db_nslcmop["nsInstanceId"]
+ db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
+ db_vnfrs = {}
+
+ # read from db: vnfd's for every vnf
+ db_vnfds = []
+
+ # for each vnf in ns, read vnfd
+ for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
+ db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
+ vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
+ # if we haven't this vnfd, read it from db
+ if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
+ # read from db
+ vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
+ db_vnfds.append(vnfd)
+ n2vc_key = self.n2vc.get_public_key()
+ n2vc_key_list = [n2vc_key]
+ self.scale_vnfr(
+ db_vnfr,
+ vdu_scaling_info.get("vdu-create"),
+ vdu_scaling_info.get("vdu-delete"),
+ mark_delete=True,
+ )
+ # db_vnfr has been updated, update db_vnfrs to use it
+ db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
+ await self._instantiate_ng_ro(
+ logging_text,
+ nsr_id,
+ db_nsd,
+ db_nsr,
+ db_nslcmop,
+ db_vnfrs,
+ db_vnfds,
+ n2vc_key_list,
+ stage=stage,
+ start_deploy=time(),
+ timeout_ns_deploy=self.timeout_ns_deploy,
+ )
+ if vdu_scaling_info.get("vdu-delete"):
+ self.scale_vnfr(
+ db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
+ )
+
+ async def extract_prometheus_scrape_jobs(
+ self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
+ ):
+ # look if exist a file called 'prometheus*.j2' and
+ artifact_content = self.fs.dir_ls(artifact_path)
+ job_file = next(
+ (
+ f
+ for f in artifact_content
+ if f.startswith("prometheus") and f.endswith(".j2")
+ ),
+ None,
+ )
+ if not job_file:
+ return
+ with self.fs.file_open((artifact_path, job_file), "r") as f:
+ job_data = f.read()
+
+ # TODO get_service
+ _, _, service = ee_id.partition(".") # remove prefix "namespace."
+ host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
+ host_port = "80"
+ vnfr_id = vnfr_id.replace("-", "")
+ variables = {
+ "JOB_NAME": vnfr_id,
+ "TARGET_IP": target_ip,
+ "EXPORTER_POD_IP": host_name,
+ "EXPORTER_POD_PORT": host_port,
+ }
+ job_list = parse_job(job_data, variables)
+ # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
+ for job in job_list:
+ if (
+ not isinstance(job.get("job_name"), str)
+ or vnfr_id not in job["job_name"]
+ ):
+ job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
+ job["nsr_id"] = nsr_id
+ job["vnfr_id"] = vnfr_id
+ return job_list
+
+ async def rebuild_start_stop(self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type):
+ logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
+ self.logger.info(logging_text + "Enter")
+ stage = ["Preparing the environment", ""]
+ # database nsrs record
+ db_nsr_update = {}
+ vdu_vim_name = None
+ vim_vm_id = None
+ # in case of error, indicates what part of scale was failed to put nsr at error status
+ start_deploy = time()
+ try:
+ db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
+ vim_account_id = db_vnfr.get("vim-account-id")
+ vim_info_key = "vim:" + vim_account_id
+ vdur = find_in_list(
+ db_vnfr["vdur"], lambda vdu: vdu["count-index"] == additional_param["count-index"]
+ )
+ if vdur:
+ vdu_vim_name = vdur["name"]
+ vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
+ target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
+ self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
+ # wait for any previous tasks in process
+ stage[1] = "Waiting for previous operations to terminate"
+ self.logger.info(stage[1])
+ await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
+
+ stage[1] = "Reading from database."
+ self.logger.info(stage[1])
+ self._write_ns_status(
+ nsr_id=nsr_id,
+ ns_state=None,
+ current_operation=operation_type.upper(),
+ current_operation_id=nslcmop_id
+ )
+ self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
+
+ # read from db: ns
+ stage[1] = "Getting nsr={} from db.".format(nsr_id)
+ db_nsr_update["operational-status"] = operation_type
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ # Payload for RO
+ desc = {
+ operation_type: {
+ "vim_vm_id": vim_vm_id,
+ "vnf_id": vnf_id,
+ "vdu_index": additional_param["count-index"],
+ "vdu_id": vdur["id"],
+ "target_vim": target_vim,
+ "vim_account_id": vim_account_id
+ }
+ }
+ stage[1] = "Sending rebuild request to RO... {}".format(desc)
+ self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
+ self.logger.info("ro nsr id: {}".format(nsr_id))
+ result_dict = await self.RO.operate(nsr_id, desc, operation_type)
+ self.logger.info("response from RO: {}".format(result_dict))
+ action_id = result_dict["action_id"]
+ await self._wait_ng_ro(
+ nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_operate
+ )
+ return "COMPLETED", "Done"
+ except (ROclient.ROClientException, DbException, LcmException) as e:
+ self.logger.error("Exit Exception {}".format(e))
+ exc = e
+ except asyncio.CancelledError:
+ self.logger.error("Cancelled Exception while '{}'".format(stage))
+ exc = "Operation was cancelled"
+ except Exception as e:
+ exc = traceback.format_exc()
+ self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
+ return "FAILED", "Error in operate VNF {}".format(exc)
+
+ def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
+ """
+ Get VCA Cloud and VCA Cloud Credentials for the VIM account
+
+ :param: vim_account_id: VIM Account ID
+
+ :return: (cloud_name, cloud_credential)
+ """
+ config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
+ return config.get("vca_cloud"), config.get("vca_cloud_credential")
+
+ def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
+ """
+ Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
+
+ :param: vim_account_id: VIM Account ID
+
+ :return: (cloud_name, cloud_credential)
+ """
+ config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
+ return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
+
+ async def migrate(self, nsr_id, nslcmop_id):
+ """
+ Migrate VNFs and VDUs instances in a NS
+
+ :param: nsr_id: NS Instance ID
+ :param: nslcmop_id: nslcmop ID of migrate
+
+ """
+ # Try to lock HA task here
+ task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
+ if not task_is_locked_by_me:
+ return
+ logging_text = "Task ns={} migrate ".format(nsr_id)
+ self.logger.debug(logging_text + "Enter")
+ # get all needed from database
+ db_nslcmop = None
+ db_nslcmop_update = {}
+ nslcmop_operation_state = None
+ db_nsr_update = {}
+ target = {}
+ exc = None
+ # in case of error, indicates what part of scale was failed to put nsr at error status
+ start_deploy = time()
+
+ try:
+ # wait for any previous tasks in process
+ step = "Waiting for previous operations to terminate"
+ await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
+
+ self._write_ns_status(
+ nsr_id=nsr_id,
+ ns_state=None,
+ current_operation="MIGRATING",
+ current_operation_id=nslcmop_id,
+ )
+ step = "Getting nslcmop from database"
+ self.logger.debug(
+ step + " after having waited for previous tasks to be completed"
+ )
+ db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
+ migrate_params = db_nslcmop.get("operationParams")
+
+ target = {}
+ target.update(migrate_params)
+ desc = await self.RO.migrate(nsr_id, target)
+ self.logger.debug("RO return > {}".format(desc))
+ action_id = desc["action_id"]
+ await self._wait_ng_ro(
+ nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_migrate,
+ operation="migrate"
+ )
+ except (ROclient.ROClientException, DbException, LcmException) as e:
+ self.logger.error("Exit Exception {}".format(e))
+ exc = e
+ except asyncio.CancelledError:
+ self.logger.error("Cancelled Exception while '{}'".format(step))
+ exc = "Operation was cancelled"
+ except Exception as e:
+ exc = traceback.format_exc()
+ self.logger.critical(
+ "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
+ )
+ finally:
+ self._write_ns_status(
+ nsr_id=nsr_id,
+ ns_state=None,
+ current_operation="IDLE",
+ current_operation_id=None,
+ )
+ if exc:
+ db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
+ nslcmop_operation_state = "FAILED"
+ else:
+ nslcmop_operation_state = "COMPLETED"
+ db_nslcmop_update["detailed-status"] = "Done"
+ db_nsr_update["detailed-status"] = "Done"
+
+ self._write_op_status(
+ op_id=nslcmop_id,
+ stage="",
+ error_message="",
+ operation_state=nslcmop_operation_state,
+ other_update=db_nslcmop_update,
+ )
+ if nslcmop_operation_state:
+ try:
+ msg = {
+ "nsr_id": nsr_id,
+ "nslcmop_id": nslcmop_id,
+ "operationState": nslcmop_operation_state,
+ }
+ await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
+ except Exception as e:
+ self.logger.error(
+ logging_text + "kafka_write notification Exception {}".format(e)
+ )
+ self.logger.debug(logging_text + "Exit")
+ self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
+
+
+ async def heal(self, nsr_id, nslcmop_id):
+ """
+ Heal NS
+
+ :param nsr_id: ns instance to heal
+ :param nslcmop_id: operation to run
+ :return:
+ """
+
+ # Try to lock HA task here
+ task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
+ if not task_is_locked_by_me: