import logging
import logging.handlers
import traceback
+import ipaddress
import json
from jinja2 import (
Environment,
if not isinstance(ip_mac, str):
return ip_mac
try:
+ next_ipv6 = None
+ next_ipv4 = None
+ dual_ip = ip_mac.split(";")
+ if len(dual_ip) == 2:
+ for ip in dual_ip:
+ if ipaddress.ip_address(ip).version == 6:
+ ipv6 = ipaddress.IPv6Address(ip)
+ next_ipv6 = str(ipaddress.IPv6Address(int(ipv6) + 1))
+ elif ipaddress.ip_address(ip).version == 4:
+ ipv4 = ipaddress.IPv4Address(ip)
+ next_ipv4 = str(ipaddress.IPv4Address(int(ipv4) + 1))
+ return [next_ipv4, next_ipv6]
# try with ipv4 look for last dot
i = ip_mac.rfind(".")
if i > 0:
)
)
ee_descriptor_id = ee_item.get("id")
- if ee_item.get("juju"):
- vca_name = ee_item["juju"].get("charm")
- if get_charm_name:
- charm_name = self.find_charm_name(db_nsr, str(vca_name))
- vca_type = (
- "lxc_proxy_charm"
- if ee_item["juju"].get("charm") is not None
- else "native_charm"
- )
- if ee_item["juju"].get("cloud") == "k8s":
- vca_type = "k8s_proxy_charm"
- elif ee_item["juju"].get("proxy") is False:
- vca_type = "native_charm"
- elif ee_item.get("helm-chart"):
- vca_name = ee_item["helm-chart"]
- vca_type = "helm-v3"
- else:
+ vca_name, charm_name, vca_type = self.get_vca_info(
+ ee_item, db_nsr, get_charm_name
+ )
+ if not vca_type:
self.logger.debug(
- logging_text + "skipping non juju neither charm configuration"
+ logging_text + "skipping, non juju/charm/helm configuration"
)
continue
if nsr_deployed.get("VCA"):
stage[1] = "Deleting all execution environments."
self.logger.debug(logging_text + stage[1])
- vca_id = self.get_vca_id({}, db_nsr)
- task_delete_ee = asyncio.ensure_future(
- asyncio.wait_for(
- self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
- timeout=self.timeout.charm_delete,
+ helm_vca_list = get_deployed_vca(db_nsr, {"type": "helm-v3"})
+ if helm_vca_list:
+ # Delete Namespace and Certificates
+ await self.vca_map["helm-v3"].delete_tls_certificate(
+ namespace=db_nslcmop["nsInstanceId"],
+ certificate_name=self.EE_TLS_NAME,
)
- )
- # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
- tasks_dict_info[task_delete_ee] = "Terminating all VCA"
-
- # Delete Namespace and Certificates if necessary
- if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
- await self.vca_map["helm-v3"].delete_tls_certificate(
- namespace=db_nslcmop["nsInstanceId"],
- certificate_name=self.EE_TLS_NAME,
- )
- await self.vca_map["helm-v3"].delete_namespace(
- namespace=db_nslcmop["nsInstanceId"],
- )
+ await self.vca_map["helm-v3"].delete_namespace(
+ namespace=db_nslcmop["nsInstanceId"],
+ )
+ else:
+ vca_id = self.get_vca_id({}, db_nsr)
+ task_delete_ee = asyncio.ensure_future(
+ asyncio.wait_for(
+ self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
+ timeout=self.timeout.charm_delete,
+ )
+ )
+ tasks_dict_info[task_delete_ee] = "Terminating all VCA"
# Delete from k8scluster
stage[1] = "Deleting KDUs."
)
)
+ step = "Checking whether the descriptor has SFC"
+ if db_nsr.get("nsd", {}).get("vnffgd"):
+ raise LcmException(
+ "Ns update is not allowed for NS with SFC"
+ )
+
# There is no change in the charm package, then redeploy the VNF
# based on new descriptor
step = "Redeploying VNF"
old_operational_status = ""
old_config_status = ""
nsi_id = None
+ prom_job_name = ""
try:
# wait for any previous tasks in process
step = "Waiting for previous operations to terminate"
old_operational_status = db_nsr["operational-status"]
old_config_status = db_nsr["config-status"]
+ step = "Checking whether the descriptor has SFC"
+ if db_nsr.get("nsd", {}).get("vnffgd"):
+ raise LcmException("Scaling is not allowed for NS with SFC")
+
step = "Parsing scaling parameters"
db_nsr_update["operational-status"] = "scaling"
self.update_db_2("nsrs", nsr_id, db_nsr_update)
nsr_id,
{
"_admin.scaling-group": [
- {"name": scaling_group, "nb-scale-op": 0}
+ {
+ "name": scaling_group,
+ "vnf_index": vnf_index,
+ "nb-scale-op": 0,
+ }
]
},
)
for admin_scale_index, admin_scale_info in enumerate(
db_nsr["_admin"]["scaling-group"]
):
- if admin_scale_info["name"] == scaling_group:
+ if (
+ admin_scale_info["name"] == scaling_group
+ and admin_scale_info["vnf_index"] == vnf_index
+ ):
nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
break
else: # not found, set index one plus last element and add new entry with the name
db_nsr_update[
"_admin.scaling-group.{}.name".format(admin_scale_index)
] = scaling_group
+ db_nsr_update[
+ "_admin.scaling-group.{}.vnf_index".format(admin_scale_index)
+ ] = vnf_index
vca_scaling_info = []
scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
vdud = get_vdu(db_vnfd, vdu_delta["id"])
# vdu_index also provides the number of instance of the targeted vdu
vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
+ if vdu_index <= len(db_vnfr["vdur"]):
+ vdu_name_id = db_vnfr["vdur"][vdu_index - 1]["vdu-name"]
+ prom_job_name = (
+ db_vnfr["_id"] + vdu_name_id + str(vdu_index - 1)
+ )
+ prom_job_name = prom_job_name.replace("_", "")
+ prom_job_name = prom_job_name.replace("-", "")
+ else:
+ prom_job_name = None
cloud_init_text = self._get_vdu_cloud_init_content(
vdud, db_vnfd
)
db_nsr_update["config-status"] = old_config_status
scale_process = None
# POST-SCALE END
+ # Check if each vnf has exporter for metric collection if so update prometheus job records
+ if scaling_type == "SCALE_OUT":
+ if "exporters-endpoints" in db_vnfd.get("df")[0]:
+ vnfr_id = db_vnfr["id"]
+ db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
+ exporter_config = db_vnfd.get("df")[0].get("exporters-endpoints")
+ self.logger.debug("exporter config :{}".format(exporter_config))
+ artifact_path = "{}/{}/{}".format(
+ base_folder["folder"],
+ base_folder["pkg-dir"],
+ "exporter-endpoint",
+ )
+ ee_id = None
+ ee_config_descriptor = exporter_config
+ rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
+ logging_text,
+ nsr_id,
+ vnfr_id,
+ vdu_id=db_vnfr["vdur"][-1]["vdu-id-ref"],
+ vdu_index=db_vnfr["vdur"][-1]["count-index"],
+ user=None,
+ pub_key=None,
+ )
+ self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
+ self.logger.debug("Artifact_path:{}".format(artifact_path))
+ vdu_id_for_prom = None
+ vdu_index_for_prom = None
+ for x in get_iterable(db_vnfr, "vdur"):
+ vdu_id_for_prom = x.get("vdu-id-ref")
+ vdu_index_for_prom = x.get("count-index")
+ vnfr_id = vnfr_id + vdu_id + str(vdu_index)
+ vnfr_id = vnfr_id.replace("_", "")
+ prometheus_jobs = await self.extract_prometheus_scrape_jobs(
+ ee_id=ee_id,
+ artifact_path=artifact_path,
+ ee_config_descriptor=ee_config_descriptor,
+ vnfr_id=vnfr_id,
+ nsr_id=nsr_id,
+ target_ip=rw_mgmt_ip,
+ element_type="VDU",
+ vdu_id=vdu_id_for_prom,
+ vdu_index=vdu_index_for_prom,
+ )
+ self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
+ if prometheus_jobs:
+ db_nsr_update[
+ "_admin.deployed.prometheus_jobs"
+ ] = prometheus_jobs
+ self.update_db_2(
+ "nsrs",
+ nsr_id,
+ db_nsr_update,
+ )
+
+ for job in prometheus_jobs:
+ self.db.set_one(
+ "prometheus_jobs",
+ {"job_name": ""},
+ job,
+ upsert=True,
+ fail_on_empty=False,
+ )
db_nsr_update[
"detailed-status"
] = "" # "scaled {} {}".format(scaling_group, scaling_type)
error_description_nslcmop = None
nslcmop_operation_state = "COMPLETED"
db_nslcmop_update["detailed-status"] = "Done"
+ if scaling_type == "SCALE_IN" and prom_job_name is not None:
+ self.db.del_one(
+ "prometheus_jobs",
+ {"job_name": prom_job_name},
+ fail_on_empty=False,
+ )
self._write_op_status(
op_id=nslcmop_id,
old_config_status = db_nsr["config-status"]
db_nsr_update = {
+ "operational-status": "healing",
"_admin.deployed.RO.operational-status": "healing",
}
self.update_db_2("nsrs", nsr_id, db_nsr_update)
task_instantiation_info=tasks_dict_info,
stage=stage,
)
-
except (
ROclient.ROClientException,
DbException,
)
)
ee_descriptor_id = ee_item.get("id")
- if ee_item.get("juju"):
- vca_name = ee_item["juju"].get("charm")
- if get_charm_name:
- charm_name = self.find_charm_name(db_nsr, str(vca_name))
- vca_type = (
- "lxc_proxy_charm"
- if ee_item["juju"].get("charm") is not None
- else "native_charm"
- )
- if ee_item["juju"].get("cloud") == "k8s":
- vca_type = "k8s_proxy_charm"
- elif ee_item["juju"].get("proxy") is False:
- vca_type = "native_charm"
- elif ee_item.get("helm-chart"):
- vca_name = ee_item["helm-chart"]
- vca_type = "helm-v3"
- else:
+ vca_name, charm_name, vca_type = self.get_vca_info(
+ ee_item, db_nsr, get_charm_name
+ )
+ if not vca_type:
self.logger.debug(
- logging_text + "skipping non juju neither charm configuration"
+ logging_text + "skipping, non juju/charm/helm configuration"
)
continue