+
+ async def _scale_ng_ro(self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage):
+ nsr_id = db_nslcmop["nsInstanceId"]
+ db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
+ db_vnfrs = {}
+
+ # read from db: vnfd's for every vnf
+ db_vnfds = {} # every vnfd data indexed by vnf id
+ db_vnfds = {}
+
+ # for each vnf in ns, read vnfd
+ for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
+ db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
+ vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
+ # if we haven't this vnfd, read it from db
+ if vnfd_id not in db_vnfds:
+ # read from db
+ vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
+ db_vnfds[vnfd_id] = vnfd # vnfd's indexed by id
+ n2vc_key = self.n2vc.get_public_key()
+ n2vc_key_list = [n2vc_key]
+ self.scale_vnfr(db_vnfr, vdu_scaling_info.get("vdu-create"), vdu_scaling_info.get("vdu-delete"),
+ mark_delete=True)
+ # db_vnfr has been updated, update db_vnfrs to use it
+ db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
+ await self._instantiate_ng_ro(logging_text, nsr_id, db_nsd, db_nsr, db_nslcmop, db_vnfrs,
+ db_vnfds, n2vc_key_list, stage=stage, start_deploy=time(),
+ timeout_ns_deploy=self.timeout_ns_deploy)
+ if vdu_scaling_info.get("vdu-delete"):
+ self.scale_vnfr(db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False)
+
+ async def _RO_scale(self, logging_text, RO_nsr_id, RO_scaling_info, db_nslcmop, db_vnfr, db_nslcmop_update,
+ vdu_scaling_info):
+ nslcmop_id = db_nslcmop["_id"]
+ nsr_id = db_nslcmop["nsInstanceId"]
+ vdu_create = vdu_scaling_info.get("vdu-create")
+ vdu_delete = vdu_scaling_info.get("vdu-delete")
+ # Scale RO retry check: Check if this sub-operation has been executed before
+ op_index = self._check_or_add_scale_suboperation(
+ db_nslcmop, db_vnfr["member-vnf-index-ref"], None, None, 'SCALE-RO', RO_nsr_id, RO_scaling_info)
+ if op_index == self.SUBOPERATION_STATUS_SKIP:
+ # Skip sub-operation
+ result = 'COMPLETED'
+ result_detail = 'Done'
+ self.logger.debug(logging_text + "Skipped sub-operation RO, result {} {}".format(result, result_detail))
+ else:
+ if op_index == self.SUBOPERATION_STATUS_NEW:
+ # New sub-operation: Get index of this sub-operation
+ op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
+ self.logger.debug(logging_text + "New sub-operation RO")
+ else:
+ # retry: Get registered params for this existing sub-operation
+ op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
+ RO_nsr_id = op.get('RO_nsr_id')
+ RO_scaling_info = op.get('RO_scaling_info')
+ self.logger.debug(logging_text + "Sub-operation RO retry")
+
+ RO_desc = await self.RO.create_action("ns", RO_nsr_id, {"vdu-scaling": RO_scaling_info})
+ # wait until ready
+ RO_nslcmop_id = RO_desc["instance_action_id"]
+ db_nslcmop_update["_admin.deploy.RO"] = RO_nslcmop_id
+
+ RO_task_done = False
+ step = detailed_status = "Waiting for VIM to scale. RO_task_id={}.".format(RO_nslcmop_id)
+ detailed_status_old = None
+ self.logger.debug(logging_text + step)
+
+ deployment_timeout = 1 * 3600 # One hour
+ while deployment_timeout > 0:
+ if not RO_task_done:
+ desc = await self.RO.show("ns", item_id_name=RO_nsr_id, extra_item="action",
+ extra_item_id=RO_nslcmop_id)
+
+ # deploymentStatus
+ self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
+
+ ns_status, ns_status_info = self.RO.check_action_status(desc)
+ if ns_status == "ERROR":
+ raise ROclient.ROClientException(ns_status_info)
+ elif ns_status == "BUILD":
+ detailed_status = step + "; {}".format(ns_status_info)
+ elif ns_status == "ACTIVE":
+ RO_task_done = True
+ self.scale_vnfr(db_vnfr, vdu_create=vdu_create, vdu_delete=vdu_delete)
+ step = detailed_status = "Waiting ns ready at RO. RO_id={}".format(RO_nsr_id)
+ self.logger.debug(logging_text + step)
+ else:
+ assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
+ else:
+ desc = await self.RO.show("ns", RO_nsr_id)
+ ns_status, ns_status_info = self.RO.check_ns_status(desc)
+ # deploymentStatus
+ self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
+
+ if ns_status == "ERROR":
+ raise ROclient.ROClientException(ns_status_info)
+ elif ns_status == "BUILD":
+ detailed_status = step + "; {}".format(ns_status_info)
+ elif ns_status == "ACTIVE":
+ step = detailed_status = \
+ "Waiting for management IP address reported by the VIM. Updating VNFRs"
+ try:
+ # nsr_deployed["nsr_ip"] = RO.get_ns_vnf_info(desc)
+ self.ns_update_vnfr({db_vnfr["member-vnf-index-ref"]: db_vnfr}, desc)
+ break
+ except LcmExceptionNoMgmtIP:
+ pass
+ else:
+ assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status)
+ if detailed_status != detailed_status_old:
+ self._update_suboperation_status(
+ db_nslcmop, op_index, 'COMPLETED', detailed_status)
+ detailed_status_old = db_nslcmop_update["detailed-status"] = detailed_status
+ self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
+
+ await asyncio.sleep(5, loop=self.loop)
+ deployment_timeout -= 5
+ if deployment_timeout <= 0:
+ self._update_suboperation_status(
+ db_nslcmop, nslcmop_id, op_index, 'FAILED', "Timeout when waiting for ns to get ready")
+ raise ROclient.ROClientException("Timeout waiting ns to be ready")
+
+ # update VDU_SCALING_INFO with the obtained ip_addresses
+ if vdu_scaling_info["scaling_direction"] == "OUT":
+ for vdur in reversed(db_vnfr["vdur"]):
+ if vdu_scaling_info["vdu-create"].get(vdur["vdu-id-ref"]):
+ vdu_scaling_info["vdu-create"][vdur["vdu-id-ref"]] -= 1
+ vdu_scaling_info["vdu"].append({
+ "name": vdur["name"] or vdur.get("vdu-name"),
+ "vdu_id": vdur["vdu-id-ref"],
+ "interface": []
+ })
+ for interface in vdur["interfaces"]:
+ vdu_scaling_info["vdu"][-1]["interface"].append({
+ "name": interface["name"],
+ "ip_address": interface["ip-address"],
+ "mac_address": interface.get("mac-address"),
+ })
+ self._update_suboperation_status(db_nslcmop, op_index, 'COMPLETED', 'Done')
+
+ async def add_prometheus_metrics(self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip):
+ if not self.prometheus:
+ return
+ # look if exist a file called 'prometheus*.j2' and
+ artifact_content = self.fs.dir_ls(artifact_path)
+ job_file = next((f for f in artifact_content if f.startswith("prometheus") and f.endswith(".j2")), None)
+ if not job_file:
+ return
+ with self.fs.file_open((artifact_path, job_file), "r") as f:
+ job_data = f.read()
+
+ # TODO get_service
+ _, _, service = ee_id.partition(".") # remove prefix "namespace."
+ host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
+ host_port = "80"
+ vnfr_id = vnfr_id.replace("-", "")
+ variables = {
+ "JOB_NAME": vnfr_id,
+ "TARGET_IP": target_ip,
+ "EXPORTER_POD_IP": host_name,
+ "EXPORTER_POD_PORT": host_port,
+ }
+ job_list = self.prometheus.parse_job(job_data, variables)
+ # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
+ for job in job_list:
+ if not isinstance(job.get("job_name"), str) or vnfr_id not in job["job_name"]:
+ job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
+ job["nsr_id"] = nsr_id
+ job_dict = {jl["job_name"]: jl for jl in job_list}
+ if await self.prometheus.update(job_dict):
+ return list(job_dict.keys())
+
+ def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
+ """
+ Get VCA Cloud and VCA Cloud Credentials for the VIM account
+
+ :param: vim_account_id: VIM Account ID
+
+ :return: (cloud_name, cloud_credential)
+ """
+ config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
+ return config.get("vca_cloud"), config.get("vca_cloud_credential")
+
+ def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
+ """
+ Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
+
+ :param: vim_account_id: VIM Account ID
+
+ :return: (cloud_name, cloud_credential)
+ """
+ config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
+ return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")