SUBOPERATION_STATUS_NOT_FOUND = -1
SUBOPERATION_STATUS_NEW = -2
SUBOPERATION_STATUS_SKIP = -3
+ EE_TLS_NAME = "ee-tls"
task_name_deploy_vca = "Deploying VCA"
+ rel_operation_types = {
+ "GE": ">=",
+ "LE": "<=",
+ "GT": ">",
+ "LT": "<",
+ "EQ": "==",
+ "NE": "!=",
+ }
def __init__(self, msg, lcm_tasks, config: LcmCfg):
"""
)
vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
if not vdur:
- return
+ continue
for a_index, a_vld in enumerate(target["ns"]["vld"]):
target_vld = find_in_list(
get_iterable(vdur, "interfaces"),
image["vim_info"] = {}
for flavor in target["flavor"]:
flavor["vim_info"] = {}
+ if db_nsr.get("shared-volumes"):
+ target["shared-volumes"] = deepcopy(db_nsr["shared-volumes"])
+ for shared_volumes in target["shared-volumes"]:
+ shared_volumes["vim_info"] = {}
if db_nsr.get("affinity-or-anti-affinity-group"):
target["affinity-or-anti-affinity-group"] = deepcopy(
db_nsr["affinity-or-anti-affinity-group"]
if target_vim not in ns_ags["vim_info"]:
ns_ags["vim_info"][target_vim] = {}
+ # shared-volumes
+ if vdur.get("shared-volumes-id"):
+ for sv_id in vdur["shared-volumes-id"]:
+ ns_sv = find_in_list(
+ target["shared-volumes"], lambda sv: sv_id in sv["id"]
+ )
+ if ns_sv:
+ ns_sv["vim_info"][target_vim] = {}
+
vdur["vim_info"] = {target_vim: {}}
# instantiation parameters
if vnf_params:
ee_id, credentials = await self.vca_map[
vca_type
].create_execution_environment(
- namespace=namespace,
+ namespace=nsr_id,
reuse_ee_id=ee_id,
db_dict=db_dict,
config=osm_config,
df = vnfd.get("df", [{}])[0]
# Checking for auto-scaling configuration
if "scaling-aspect" in df:
- rel_operation_types = {
- "GE": ">=",
- "LE": "<=",
- "GT": ">",
- "LT": "<",
- "EQ": "==",
- "NE": "!=",
- }
scaling_aspects = df["scaling-aspect"]
all_vnfd_monitoring_params = {}
for ivld in vnfd.get("int-virtual-link-desc", ()):
operation = scaling_criteria[
"scale-in-relational-operation"
]
- rel_operator = rel_operation_types.get(operation, "<=")
+ rel_operator = self.rel_operation_types.get(
+ operation, "<="
+ )
metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
expression = f"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
labels = {
operation = scaling_criteria[
"scale-out-relational-operation"
]
- rel_operator = rel_operation_types.get(operation, "<=")
+ rel_operator = self.rel_operation_types.get(
+ operation, "<="
+ )
metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
expression = f"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
labels = {
alerts.append(alert)
return alerts
+ def _gather_vnfr_alarm_alerts(self, vnfr, vnfd):
+ alerts = []
+ nsr_id = vnfr["nsr-id-ref"]
+ vnf_member_index = vnfr["member-vnf-index-ref"]
+
+ # Checking for VNF alarm configuration
+ for vdur in vnfr["vdur"]:
+ vdu_id = vdur["vdu-id-ref"]
+ vdu = next(filter(lambda vdu: vdu["id"] == vdu_id, vnfd["vdu"]))
+ if "alarm" in vdu:
+ # Get VDU monitoring params, since alerts are based on them
+ vdu_monitoring_params = {}
+ for mp in vdu.get("monitoring-parameter", []):
+ vdu_monitoring_params[mp.get("id")] = mp
+ if not vdu_monitoring_params:
+ self.logger.error(
+ "VDU alarm refers to a VDU monitoring param, but there are no VDU monitoring params in the VDU"
+ )
+ continue
+ # Get alarms in the VDU
+ alarm_descriptors = vdu["alarm"]
+ # Create VDU alarms for each alarm in the VDU
+ for alarm_descriptor in alarm_descriptors:
+ # Check that the VDU alarm refers to a proper monitoring param
+ alarm_monitoring_param = alarm_descriptor.get(
+ "vnf-monitoring-param-ref", ""
+ )
+ vdu_specific_monitoring_param = vdu_monitoring_params.get(
+ alarm_monitoring_param, {}
+ )
+ if not vdu_specific_monitoring_param:
+ self.logger.error(
+ "VDU alarm refers to a VDU monitoring param not present in the VDU"
+ )
+ continue
+ metric_name = vdu_specific_monitoring_param.get(
+ "performance-metric"
+ )
+ if not metric_name:
+ self.logger.error(
+ "VDU alarm refers to a VDU monitoring param that has no associated performance-metric"
+ )
+ continue
+ # Set params of the alarm to be created in Prometheus
+ metric_name = f"osm_{metric_name}"
+ metric_threshold = alarm_descriptor.get("value")
+ uuid = str(uuid4())
+ alert_name = f"vdu_alarm_{uuid}"
+ operation = alarm_descriptor["operation"]
+ rel_operator = self.rel_operation_types.get(operation, "<=")
+ metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
+ expression = f"{metric_selector} {rel_operator} {metric_threshold}"
+ labels = {
+ "ns_id": nsr_id,
+ "vnf_member_index": vnf_member_index,
+ "vdu_id": vdu_id,
+ "vdu_name": "{{ $labels.vdu_name }}",
+ }
+ prom_cfg = {
+ "alert": alert_name,
+ "expr": expression,
+ "for": "1m", # default value. Ideally, this should be related to an IM param, but there is not such param
+ "labels": labels,
+ }
+ alarm_action = dict()
+ for action_type in ["ok", "insufficient-data", "alarm"]:
+ if (
+ "actions" in alarm_descriptor
+ and action_type in alarm_descriptor["actions"]
+ ):
+ alarm_action[action_type] = alarm_descriptor["actions"][
+ action_type
+ ]
+ alert = {
+ "uuid": uuid,
+ "name": alert_name,
+ "metric": metric_name,
+ "tags": {
+ "ns_id": nsr_id,
+ "vnf_member_index": vnf_member_index,
+ "vdu_id": vdu_id,
+ },
+ "alarm_status": "ok",
+ "action_type": "vdu_alarm",
+ "action": alarm_action,
+ "prometheus_config": prom_cfg,
+ }
+ alerts.append(alert)
+ return alerts
+
def update_nsrs_with_pla_result(self, params):
try:
nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
# create namespace and certificate if any helm based EE is present in the NS
if check_helm_ee_in_ns(db_vnfds):
- # TODO: create EE namespace
+ await self.vca_map["helm-v3"].setup_ns_namespace(
+ name=nsr_id,
+ )
# create TLS certificates
await self.vca_map["helm-v3"].create_tls_certificate(
- secret_name="ee-tls-{}".format(nsr_id),
+ secret_name=self.EE_TLS_NAME,
dns_prefix="*",
nsr_id=nsr_id,
usage="server auth",
+ namespace=nsr_id,
)
nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
stage=stage,
)
+ # Check if each vnf has exporter for metric collection if so update prometheus job records
+ if "exporters-endpoints" in vnfd.get("df")[0]:
+ exporter_config = vnfd.get("df")[0].get("exporters-endpoints")
+ self.logger.debug("exporter config :{}".format(exporter_config))
+ artifact_path = "{}/{}/{}".format(
+ base_folder["folder"],
+ base_folder["pkg-dir"],
+ "exporter-endpoint",
+ )
+ ee_id = None
+ ee_config_descriptor = exporter_config
+ vnfr_id = db_vnfr["id"]
+ rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
+ logging_text,
+ nsr_id,
+ vnfr_id,
+ vdu_id=None,
+ vdu_index=None,
+ user=None,
+ pub_key=None,
+ )
+ self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
+ self.logger.debug("Artifact_path:{}".format(artifact_path))
+ db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
+ vdu_id_for_prom = None
+ vdu_index_for_prom = None
+ for x in get_iterable(db_vnfr, "vdur"):
+ vdu_id_for_prom = x.get("vdu-id-ref")
+ vdu_index_for_prom = x.get("count-index")
+ prometheus_jobs = await self.extract_prometheus_scrape_jobs(
+ ee_id=ee_id,
+ artifact_path=artifact_path,
+ ee_config_descriptor=ee_config_descriptor,
+ vnfr_id=vnfr_id,
+ nsr_id=nsr_id,
+ target_ip=rw_mgmt_ip,
+ element_type="VDU",
+ vdu_id=vdu_id_for_prom,
+ vdu_index=vdu_index_for_prom,
+ )
+
+ self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
+ if prometheus_jobs:
+ db_nsr_update["_admin.deployed.prometheus_jobs"] = prometheus_jobs
+ self.update_db_2(
+ "nsrs",
+ nsr_id,
+ db_nsr_update,
+ )
+
+ for job in prometheus_jobs:
+ self.db.set_one(
+ "prometheus_jobs",
+ {"job_name": job["job_name"]},
+ job,
+ upsert=True,
+ fail_on_empty=False,
+ )
+
# Check if this NS has a charm configuration
descriptor_config = nsd.get("ns-configuration")
if descriptor_config and descriptor_config.get("juju"):
self.logger.info(f"Storing scaling alert in MongoDB: {alert}")
self.db.create("alerts", alert)
+ alarm_alerts = self._gather_vnfr_alarm_alerts(vnfr, vnfd)
+ for alert in alarm_alerts:
+ self.logger.info(f"Storing VNF alarm alert in MongoDB: {alert}")
+ self.db.create("alerts", alert)
if db_nsr:
self._write_ns_status(
nsr_id=nsr_id,
# TODO vdu_index_count
for vca in vca_deployed_list:
if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
- return vca["ee_id"]
+ return vca.get("ee_id")
async def destroy_N2VC(
self,
# Delete Namespace and Certificates if necessary
if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
await self.vca_map["helm-v3"].delete_tls_certificate(
- certificate_name=db_nslcmop["nsInstanceId"],
+ namespace=db_nslcmop["nsInstanceId"],
+ certificate_name=self.EE_TLS_NAME,
+ )
+ await self.vca_map["helm-v3"].delete_namespace(
+ namespace=db_nslcmop["nsInstanceId"],
)
- # TODO: Delete namespace
# Delete from k8scluster
stage[1] = "Deleting KDUs."
)
if not job_file:
return
+ self.logger.debug("Artifact path{}".format(artifact_path))
+ self.logger.debug("job file{}".format(job_file))
with self.fs.file_open((artifact_path, job_file), "r") as f:
job_data = f.read()
f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
)
- # TODO get_service
- _, _, service = ee_id.partition(".") # remove prefix "namespace."
- host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
- host_port = "80"
- vnfr_id = vnfr_id.replace("-", "")
- variables = {
- "JOB_NAME": vnfr_id,
- "TARGET_IP": target_ip,
- "EXPORTER_POD_IP": host_name,
- "EXPORTER_POD_PORT": host_port,
- "NSR_ID": nsr_id,
- "VNF_MEMBER_INDEX": vnf_member_index,
- "VDUR_NAME": vdur_name,
- "KDUR_NAME": kdur_name,
- "ELEMENT_TYPE": element_type,
- }
+ if ee_id is not None:
+ _, namespace, helm_id = get_ee_id_parts(
+ ee_id
+ ) # get namespace and EE gRPC service name
+ host_name = f'{helm_id}-{ee_config_descriptor["metric-service"]}.{namespace}.svc' # svc_name.namespace.svc
+ host_port = "80"
+ vnfr_id = vnfr_id.replace("-", "")
+ variables = {
+ "JOB_NAME": vnfr_id,
+ "TARGET_IP": target_ip,
+ "EXPORTER_POD_IP": host_name,
+ "EXPORTER_POD_PORT": host_port,
+ "NSR_ID": nsr_id,
+ "VNF_MEMBER_INDEX": vnf_member_index,
+ "VDUR_NAME": vdur_name,
+ "KDUR_NAME": kdur_name,
+ "ELEMENT_TYPE": element_type,
+ }
+ else:
+ metric_path = ee_config_descriptor["metric-path"]
+ target_port = ee_config_descriptor["metric-port"]
+ vnfr_id = vnfr_id.replace("-", "")
+ variables = {
+ "JOB_NAME": vnfr_id,
+ "TARGET_IP": target_ip,
+ "TARGET_PORT": target_port,
+ "METRIC_PATH": metric_path,
+ }
+
job_list = parse_job(job_data, variables)
# ensure job_name is using the vnfr_id. Adding the metadata nsr_id
for job in job_list: