old_operational_status = ""
old_config_status = ""
nsi_id = None
+ prom_job_name = ""
try:
# wait for any previous tasks in process
step = "Waiting for previous operations to terminate"
vdud = get_vdu(db_vnfd, vdu_delta["id"])
# vdu_index also provides the number of instance of the targeted vdu
vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
+ if vdu_index <= len(db_vnfr["vdur"]):
+ vdu_name_id = db_vnfr["vdur"][vdu_index - 1]["vdu-name"]
+ prom_job_name = (
+ db_vnfr["_id"] + vdu_name_id + str(vdu_index - 1)
+ )
+ prom_job_name = prom_job_name.replace("_", "")
+ prom_job_name = prom_job_name.replace("-", "")
+ else:
+ prom_job_name = None
cloud_init_text = self._get_vdu_cloud_init_content(
vdud, db_vnfd
)
db_nsr_update["config-status"] = old_config_status
scale_process = None
# POST-SCALE END
+ # Check if each vnf has exporter for metric collection if so update prometheus job records
+ if scaling_type == "SCALE_OUT":
+ if "exporters-endpoints" in db_vnfd.get("df")[0]:
+ vnfr_id = db_vnfr["id"]
+ db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
+ exporter_config = db_vnfd.get("df")[0].get("exporters-endpoints")
+ self.logger.debug("exporter config :{}".format(exporter_config))
+ artifact_path = "{}/{}/{}".format(
+ base_folder["folder"],
+ base_folder["pkg-dir"],
+ "exporter-endpoint",
+ )
+ ee_id = None
+ ee_config_descriptor = exporter_config
+ rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
+ logging_text,
+ nsr_id,
+ vnfr_id,
+ vdu_id=db_vnfr["vdur"][-1]["vdu-id-ref"],
+ vdu_index=db_vnfr["vdur"][-1]["count-index"],
+ user=None,
+ pub_key=None,
+ )
+ self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
+ self.logger.debug("Artifact_path:{}".format(artifact_path))
+ vdu_id_for_prom = None
+ vdu_index_for_prom = None
+ for x in get_iterable(db_vnfr, "vdur"):
+ vdu_id_for_prom = x.get("vdu-id-ref")
+ vdu_index_for_prom = x.get("count-index")
+ vnfr_id = vnfr_id + vdu_id + str(vdu_index)
+ vnfr_id = vnfr_id.replace("_", "")
+ prometheus_jobs = await self.extract_prometheus_scrape_jobs(
+ ee_id=ee_id,
+ artifact_path=artifact_path,
+ ee_config_descriptor=ee_config_descriptor,
+ vnfr_id=vnfr_id,
+ nsr_id=nsr_id,
+ target_ip=rw_mgmt_ip,
+ element_type="VDU",
+ vdu_id=vdu_id_for_prom,
+ vdu_index=vdu_index_for_prom,
+ )
+
+ self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
+ if prometheus_jobs:
+ db_nsr_update[
+ "_admin.deployed.prometheus_jobs"
+ ] = prometheus_jobs
+ self.update_db_2(
+ "nsrs",
+ nsr_id,
+ db_nsr_update,
+ )
+ for job in prometheus_jobs:
+ self.db.set_one(
+ "prometheus_jobs",
+ {"job_name": ""},
+ job,
+ upsert=True,
+ fail_on_empty=False,
+ )
db_nsr_update[
"detailed-status"
] = "" # "scaled {} {}".format(scaling_group, scaling_type)
error_description_nslcmop = None
nslcmop_operation_state = "COMPLETED"
db_nslcmop_update["detailed-status"] = "Done"
+ if scaling_type == "SCALE_IN" and prom_job_name is not None:
+ self.db.del_one(
+ "prometheus_jobs",
+ {"job_name": prom_job_name},
+ fail_on_empty=False,
+ )
self._write_op_status(
op_id=nslcmop_id,