from osm_lcm.ng_ro import NgRoClient, NgRoException
from osm_lcm.lcm_utils import (
LcmException,
- LcmExceptionNoMgmtIP,
LcmBase,
deep_get,
get_iterable,
from osm_lcm.data_utils.dict_utils import parse_yaml_strings
from osm_lcm.data_utils.database.vim_account import VimAccountDB
from n2vc.definitions import RelationEndpoint
-from n2vc.k8s_helm_conn import K8sHelmConnector
from n2vc.k8s_helm3_conn import K8sHelm3Connector
from n2vc.k8s_juju_conn import K8sJujuConnector
from time import time
from uuid import uuid4
-from random import randint
+from random import SystemRandom
__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
SUBOPERATION_STATUS_NOT_FOUND = -1
SUBOPERATION_STATUS_NEW = -2
SUBOPERATION_STATUS_SKIP = -3
+ EE_TLS_NAME = "ee-tls"
task_name_deploy_vca = "Deploying VCA"
-
- def __init__(self, msg, lcm_tasks, config: LcmCfg, loop):
+ rel_operation_types = {
+ "GE": ">=",
+ "LE": "<=",
+ "GT": ">",
+ "LT": "<",
+ "EQ": "==",
+ "NE": "!=",
+ }
+
+ def __init__(self, msg, lcm_tasks, config: LcmCfg):
"""
Init, Connect to database, filesystem storage, and messaging
:param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
self.db = Database().instance.db
self.fs = Filesystem().instance.fs
- self.loop = loop
self.lcm_tasks = lcm_tasks
self.timeout = config.timeout
self.ro_config = config.RO
# create N2VC connector
self.n2vc = N2VCJujuConnector(
log=self.logger,
- loop=self.loop,
on_update_db=self._on_update_n2vc_db,
fs=self.fs,
db=self.db,
self.conn_helm_ee = LCMHelmConn(
log=self.logger,
- loop=self.loop,
vca_config=self.vca_config,
on_update_db=self._on_update_n2vc_db,
)
- self.k8sclusterhelm2 = K8sHelmConnector(
- kubectl_command=self.vca_config.kubectlpath,
- helm_command=self.vca_config.helmpath,
- log=self.logger,
- on_update_db=None,
- fs=self.fs,
- db=self.db,
- )
-
self.k8sclusterhelm3 = K8sHelm3Connector(
kubectl_command=self.vca_config.kubectlpath,
helm_command=self.vca_config.helm3path,
kubectl_command=self.vca_config.kubectlpath,
juju_command=self.vca_config.jujupath,
log=self.logger,
- loop=self.loop,
on_update_db=self._on_update_k8s_db,
fs=self.fs,
db=self.db,
)
self.k8scluster_map = {
- "helm-chart": self.k8sclusterhelm2,
"helm-chart-v3": self.k8sclusterhelm3,
"chart": self.k8sclusterhelm3,
"juju-bundle": self.k8sclusterjuju,
}
# create RO client
- self.RO = NgRoClient(self.loop, **self.ro_config.to_dict())
+ self.RO = NgRoClient(**self.ro_config.to_dict())
self.op_status_map = {
"instantiation": self.RO.status,
pass
return None
- def _on_update_ro_db(self, nsrs_id, ro_descriptor):
- # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
-
- try:
- # TODO filter RO descriptor fields...
-
- # write to database
- db_dict = dict()
- # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
- db_dict["deploymentStatus"] = ro_descriptor
- self.update_db_2("nsrs", nsrs_id, db_dict)
-
- except Exception as e:
- self.logger.warn(
- "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
- )
-
async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
# remove last dot from path (if exists)
if path.endswith("."):
additional_params = vdur.get("additionalParams")
return parse_yaml_strings(additional_params)
- def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
- """
- Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
- :param vnfd: input vnfd
- :param new_id: overrides vnf id if provided
- :param additionalParams: Instantiation params for VNFs provided
- :param nsrId: Id of the NSR
- :return: copy of vnfd
- """
- vnfd_RO = deepcopy(vnfd)
- # remove unused by RO configuration, monitoring, scaling and internal keys
- vnfd_RO.pop("_id", None)
- vnfd_RO.pop("_admin", None)
- vnfd_RO.pop("monitoring-param", None)
- vnfd_RO.pop("scaling-group-descriptor", None)
- vnfd_RO.pop("kdu", None)
- vnfd_RO.pop("k8s-cluster", None)
- if new_id:
- vnfd_RO["id"] = new_id
-
- # parse cloud-init or cloud-init-file with the provided variables using Jinja2
- for vdu in get_iterable(vnfd_RO, "vdu"):
- vdu.pop("cloud-init-file", None)
- vdu.pop("cloud-init", None)
- return vnfd_RO
-
@staticmethod
def ip_profile_2_RO(ip_profile):
RO_ip_profile = deepcopy(ip_profile)
RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
return RO_ip_profile
- def _get_ro_vim_id_for_vim_account(self, vim_account):
- db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
- if db_vim["_admin"]["operationalState"] != "ENABLED":
- raise LcmException(
- "VIM={} is not available. operationalState={}".format(
- vim_account, db_vim["_admin"]["operationalState"]
- )
- )
- RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
- return RO_vim_id
-
- def get_ro_wim_id_for_wim_account(self, wim_account):
- if isinstance(wim_account, str):
- db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
- if db_wim["_admin"]["operationalState"] != "ENABLED":
- raise LcmException(
- "WIM={} is not available. operationalState={}".format(
- wim_account, db_wim["_admin"]["operationalState"]
- )
- )
- RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
- return RO_wim_id
- else:
- return wim_account
-
def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
db_vdu_push_list = []
template_vdur = []
except DbException as e:
self.logger.error("Cannot update vnf. {}".format(e))
- def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
- """
- Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
- :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
- :param nsr_desc_RO: nsr descriptor from RO
- :return: Nothing, LcmException is raised on errors
- """
- for vnf_index, db_vnfr in db_vnfrs.items():
- for vnf_RO in nsr_desc_RO["vnfs"]:
- if vnf_RO["member_vnf_index"] != vnf_index:
- continue
- vnfr_update = {}
- if vnf_RO.get("ip_address"):
- db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
- "ip_address"
- ].split(";")[0]
- elif not db_vnfr.get("ip-address"):
- if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
- raise LcmExceptionNoMgmtIP(
- "ns member_vnf_index '{}' has no IP address".format(
- vnf_index
- )
- )
-
- for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
- vdur_RO_count_index = 0
- if vdur.get("pdu-type"):
- continue
- for vdur_RO in get_iterable(vnf_RO, "vms"):
- if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
- continue
- if vdur["count-index"] != vdur_RO_count_index:
- vdur_RO_count_index += 1
- continue
- vdur["vim-id"] = vdur_RO.get("vim_vm_id")
- if vdur_RO.get("ip_address"):
- vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
- else:
- vdur["ip-address"] = None
- vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
- vdur["name"] = vdur_RO.get("vim_name")
- vdur["status"] = vdur_RO.get("status")
- vdur["status-detailed"] = vdur_RO.get("error_msg")
- for ifacer in get_iterable(vdur, "interfaces"):
- for interface_RO in get_iterable(vdur_RO, "interfaces"):
- if ifacer["name"] == interface_RO.get("internal_name"):
- ifacer["ip-address"] = interface_RO.get(
- "ip_address"
- )
- ifacer["mac-address"] = interface_RO.get(
- "mac_address"
- )
- break
- else:
- raise LcmException(
- "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
- "from VIM info".format(
- vnf_index, vdur["vdu-id-ref"], ifacer["name"]
- )
- )
- vnfr_update["vdur.{}".format(vdu_index)] = vdur
- break
- else:
- raise LcmException(
- "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
- "VIM info".format(
- vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
- )
- )
-
- for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
- for net_RO in get_iterable(nsr_desc_RO, "nets"):
- if vld["id"] != net_RO.get("vnf_net_osm_id"):
- continue
- vld["vim-id"] = net_RO.get("vim_net_id")
- vld["name"] = net_RO.get("vim_name")
- vld["status"] = net_RO.get("status")
- vld["status-detailed"] = net_RO.get("error_msg")
- vnfr_update["vld.{}".format(vld_index)] = vld
- break
- else:
- raise LcmException(
- "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
- vnf_index, vld["id"]
- )
- )
-
- self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
- break
-
- else:
- raise LcmException(
- "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
- vnf_index
- )
- )
-
def _get_ns_config_info(self, nsr_id):
"""
Generates a mapping between vnf,vdu elements and the N2VC id
)
vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
if not vdur:
- return
+ continue
for a_index, a_vld in enumerate(target["ns"]["vld"]):
target_vld = find_in_list(
get_iterable(vdur, "interfaces"),
image["vim_info"] = {}
for flavor in target["flavor"]:
flavor["vim_info"] = {}
+ if db_nsr.get("shared-volumes"):
+ target["shared-volumes"] = deepcopy(db_nsr["shared-volumes"])
+ for shared_volumes in target["shared-volumes"]:
+ shared_volumes["vim_info"] = {}
if db_nsr.get("affinity-or-anti-affinity-group"):
target["affinity-or-anti-affinity-group"] = deepcopy(
db_nsr["affinity-or-anti-affinity-group"]
if target_vim not in ns_ags["vim_info"]:
ns_ags["vim_info"][target_vim] = {}
+ # shared-volumes
+ if vdur.get("shared-volumes-id"):
+ for sv_id in vdur["shared-volumes-id"]:
+ ns_sv = find_in_list(
+ target["shared-volumes"], lambda sv: sv_id in sv["id"]
+ )
+ if ns_sv:
+ ns_sv["vim_info"][target_vim] = {}
+
vdur["vim_info"] = {target_vim: {}}
# instantiation parameters
if vnf_params:
db_nsr_update["detailed-status"] = " ".join(stage)
self.update_db_2("nsrs", nsr_id, db_nsr_update)
self._write_op_status(nslcmop_id, stage)
- await asyncio.sleep(15, loop=self.loop)
+ await asyncio.sleep(15)
else: # timeout_ns_deploy
raise NgRoException("Timeout waiting ns to deploy")
"target KDU={} is in error state".format(kdu_name)
)
- await asyncio.sleep(10, loop=self.loop)
+ await asyncio.sleep(10)
nb_tries += 1
raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
"Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
)
- await asyncio.sleep(10, loop=self.loop)
+ await asyncio.sleep(10)
# get ip address
if not target_vdu_id:
vca_id = self.get_vca_id(db_vnfr, db_nsr)
# create or register execution environment in VCA
- if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
+ if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm-v3"):
self._write_configuration_status(
nsr_id=nsr_id,
vca_index=vca_index,
db_dict=db_dict,
vca_id=vca_id,
)
- elif vca_type == "helm" or vca_type == "helm-v3":
+ elif vca_type == "helm-v3":
ee_id, credentials = await self.vca_map[
vca_type
].create_execution_environment(
- namespace=namespace,
+ namespace=nsr_id,
reuse_ee_id=ee_id,
db_dict=db_dict,
config=osm_config,
# if SSH access is required, then get execution environment SSH public
# if native charm we have waited already to VM be UP
- if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
+ if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
pub_key = None
user = None
# self.logger.debug("get ssh key block")
# TODO register in database that primitive is done
# STEP 7 Configure metrics
- if vca_type == "helm" or vca_type == "helm-v3":
+ if vca_type == "helm-v3":
# TODO: review for those cases where the helm chart is a reference and
# is not part of the NF package
prometheus_jobs = await self.extract_prometheus_scrape_jobs(
self.logger.debug(
logging_text + "Invoke and wait for placement optimization"
)
- await self.msg.aiowrite(
- "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
- )
+ await self.msg.aiowrite("pla", "get_placement", {"nslcmopId": nslcmop_id})
db_poll_interval = 5
wait = db_poll_interval * 10
pla_result = None
df = vnfd.get("df", [{}])[0]
# Checking for auto-scaling configuration
if "scaling-aspect" in df:
- rel_operation_types = {
- "GE": ">=",
- "LE": "<=",
- "GT": ">",
- "LT": "<",
- "EQ": "==",
- "NE": "!=",
- }
scaling_aspects = df["scaling-aspect"]
all_vnfd_monitoring_params = {}
for ivld in vnfd.get("int-virtual-link-desc", ()):
for vdur in monitored_vdurs:
vdu_id = vdur["vdu-id-ref"]
metric_name = vnf_monitoring_param.get("performance-metric")
+ metric_name = f"osm_{metric_name}"
vnf_member_index = vnfr["member-vnf-index-ref"]
scalein_threshold = scaling_criteria.get(
"scale-in-threshold"
operation = scaling_criteria[
"scale-in-relational-operation"
]
- rel_operator = rel_operation_types.get(operation, "<=")
+ rel_operator = self.rel_operation_types.get(
+ operation, "<="
+ )
metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
expression = f"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
labels = {
operation = scaling_criteria[
"scale-out-relational-operation"
]
- rel_operator = rel_operation_types.get(operation, "<=")
+ rel_operator = self.rel_operation_types.get(
+ operation, "<="
+ )
metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
expression = f"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
labels = {
alerts.append(alert)
return alerts
+ def _gather_vnfr_alarm_alerts(self, vnfr, vnfd):
+ alerts = []
+ nsr_id = vnfr["nsr-id-ref"]
+ vnf_member_index = vnfr["member-vnf-index-ref"]
+
+ # Checking for VNF alarm configuration
+ for vdur in vnfr["vdur"]:
+ vdu_id = vdur["vdu-id-ref"]
+ vdu = next(filter(lambda vdu: vdu["id"] == vdu_id, vnfd["vdu"]))
+ if "alarm" in vdu:
+ # Get VDU monitoring params, since alerts are based on them
+ vdu_monitoring_params = {}
+ for mp in vdu.get("monitoring-parameter", []):
+ vdu_monitoring_params[mp.get("id")] = mp
+ if not vdu_monitoring_params:
+ self.logger.error(
+ "VDU alarm refers to a VDU monitoring param, but there are no VDU monitoring params in the VDU"
+ )
+ continue
+ # Get alarms in the VDU
+ alarm_descriptors = vdu["alarm"]
+ # Create VDU alarms for each alarm in the VDU
+ for alarm_descriptor in alarm_descriptors:
+ # Check that the VDU alarm refers to a proper monitoring param
+ alarm_monitoring_param = alarm_descriptor.get(
+ "vnf-monitoring-param-ref", ""
+ )
+ vdu_specific_monitoring_param = vdu_monitoring_params.get(
+ alarm_monitoring_param, {}
+ )
+ if not vdu_specific_monitoring_param:
+ self.logger.error(
+ "VDU alarm refers to a VDU monitoring param not present in the VDU"
+ )
+ continue
+ metric_name = vdu_specific_monitoring_param.get(
+ "performance-metric"
+ )
+ if not metric_name:
+ self.logger.error(
+ "VDU alarm refers to a VDU monitoring param that has no associated performance-metric"
+ )
+ continue
+ # Set params of the alarm to be created in Prometheus
+ metric_name = f"osm_{metric_name}"
+ metric_threshold = alarm_descriptor.get("value")
+ uuid = str(uuid4())
+ alert_name = f"vdu_alarm_{uuid}"
+ operation = alarm_descriptor["operation"]
+ rel_operator = self.rel_operation_types.get(operation, "<=")
+ metric_selector = f'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
+ expression = f"{metric_selector} {rel_operator} {metric_threshold}"
+ labels = {
+ "ns_id": nsr_id,
+ "vnf_member_index": vnf_member_index,
+ "vdu_id": vdu_id,
+ "vdu_name": "{{ $labels.vdu_name }}",
+ }
+ prom_cfg = {
+ "alert": alert_name,
+ "expr": expression,
+ "for": "1m", # default value. Ideally, this should be related to an IM param, but there is not such param
+ "labels": labels,
+ }
+ alarm_action = dict()
+ for action_type in ["ok", "insufficient-data", "alarm"]:
+ if (
+ "actions" in alarm_descriptor
+ and action_type in alarm_descriptor["actions"]
+ ):
+ alarm_action[action_type] = alarm_descriptor["actions"][
+ action_type
+ ]
+ alert = {
+ "uuid": uuid,
+ "name": alert_name,
+ "metric": metric_name,
+ "tags": {
+ "ns_id": nsr_id,
+ "vnf_member_index": vnf_member_index,
+ "vdu_id": vdu_id,
+ },
+ "alarm_status": "ok",
+ "action_type": "vdu_alarm",
+ "action": alarm_action,
+ "prometheus_config": prom_cfg,
+ }
+ alerts.append(alert)
+ return alerts
+
def update_nsrs_with_pla_result(self, params):
try:
nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
# create namespace and certificate if any helm based EE is present in the NS
if check_helm_ee_in_ns(db_vnfds):
- # TODO: create EE namespace
+ await self.vca_map["helm-v3"].setup_ns_namespace(
+ name=nsr_id,
+ )
# create TLS certificates
await self.vca_map["helm-v3"].create_tls_certificate(
- secret_name="ee-tls-{}".format(nsr_id),
+ secret_name=self.EE_TLS_NAME,
dns_prefix="*",
nsr_id=nsr_id,
usage="server auth",
+ namespace=nsr_id,
)
nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
stage=stage,
)
+ # Check if each vnf has exporter for metric collection if so update prometheus job records
+ if "exporters-endpoints" in vnfd.get("df")[0]:
+ exporter_config = vnfd.get("df")[0].get("exporters-endpoints")
+ self.logger.debug("exporter config :{}".format(exporter_config))
+ artifact_path = "{}/{}/{}".format(
+ base_folder["folder"],
+ base_folder["pkg-dir"],
+ "exporter-endpoint",
+ )
+ ee_id = None
+ ee_config_descriptor = exporter_config
+ vnfr_id = db_vnfr["id"]
+ rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
+ logging_text,
+ nsr_id,
+ vnfr_id,
+ vdu_id=None,
+ vdu_index=None,
+ user=None,
+ pub_key=None,
+ )
+ self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
+ self.logger.debug("Artifact_path:{}".format(artifact_path))
+ db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
+ vdu_id_for_prom = None
+ vdu_index_for_prom = None
+ for x in get_iterable(db_vnfr, "vdur"):
+ vdu_id_for_prom = x.get("vdu-id-ref")
+ vdu_index_for_prom = x.get("count-index")
+ prometheus_jobs = await self.extract_prometheus_scrape_jobs(
+ ee_id=ee_id,
+ artifact_path=artifact_path,
+ ee_config_descriptor=ee_config_descriptor,
+ vnfr_id=vnfr_id,
+ nsr_id=nsr_id,
+ target_ip=rw_mgmt_ip,
+ element_type="VDU",
+ vdu_id=vdu_id_for_prom,
+ vdu_index=vdu_index_for_prom,
+ )
+
+ self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
+ if prometheus_jobs:
+ db_nsr_update["_admin.deployed.prometheus_jobs"] = prometheus_jobs
+ self.update_db_2(
+ "nsrs",
+ nsr_id,
+ db_nsr_update,
+ )
+
+ for job in prometheus_jobs:
+ self.db.set_one(
+ "prometheus_jobs",
+ {"job_name": job["job_name"]},
+ job,
+ upsert=True,
+ fail_on_empty=False,
+ )
+
# Check if this NS has a charm configuration
descriptor_config = nsd.get("ns-configuration")
if descriptor_config and descriptor_config.get("juju"):
self.logger.info(f"Storing scaling alert in MongoDB: {alert}")
self.db.create("alerts", alert)
+ alarm_alerts = self._gather_vnfr_alarm_alerts(vnfr, vnfd)
+ for alert in alarm_alerts:
+ self.logger.info(f"Storing VNF alarm alert in MongoDB: {alert}")
+ self.db.create("alerts", alert)
if db_nsr:
self._write_ns_status(
nsr_id=nsr_id,
"nsr_id": nsr_id,
"nslcmop_id": nslcmop_id,
"operationState": nslcmop_operation_state,
+ "startTime": db_nslcmop["startTime"],
+ "links": db_nslcmop["links"],
+ "operationParams": {
+ "nsInstanceId": nsr_id,
+ "nsdId": db_nsr["nsd-id"],
+ },
},
- loop=self.loop,
)
except Exception as e:
self.logger.error(
vnfr_data.get("_id"),
{"kdur.{}.status".format(kdu_index): "ERROR"},
)
- except Exception:
+ except Exception as error:
# ignore to keep original exception
- pass
+ self.logger.warning(
+ f"An exception occurred while updating DB: {str(error)}"
+ )
# reraise original error
raise
k8scluster_id_2_uuic = {
"helm-chart-v3": {},
- "helm-chart": {},
"juju-bundle": {},
}
# Default version: helm3, if helm-version is v2 assign v2
k8sclustertype = "helm-chart-v3"
self.logger.debug("kdur: {}".format(kdur))
- if (
- kdur.get("helm-version")
- and kdur.get("helm-version") == "v2"
- ):
- k8sclustertype = "helm-chart"
elif kdur.get("juju-bundle"):
kdumodel = kdur["juju-bundle"]
k8sclustertype = "juju-bundle"
kdumodel = self.fs.path + filename
except (asyncio.TimeoutError, asyncio.CancelledError):
raise
- except Exception: # it is not a file
- pass
+ except Exception as e: # it is not a file
+ self.logger.warning(f"An exception occurred: {str(e)}")
k8s_cluster_id = kdur["k8s-cluster"]["id"]
step = "Synchronize repos for k8s cluster '{}'".format(
vca_type = "native_charm"
elif ee_item.get("helm-chart"):
vca_name = ee_item["helm-chart"]
- if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
- vca_type = "helm"
- else:
- vca_type = "helm-v3"
+ vca_type = "helm-v3"
else:
self.logger.debug(
logging_text + "skipping non juju neither charm configuration"
member_vnf_index or "", vdu_id or ""
)
- @staticmethod
- def _create_nslcmop(nsr_id, operation, params):
- """
- Creates a ns-lcm-opp content to be stored at database.
- :param nsr_id: internal id of the instance
- :param operation: instantiate, terminate, scale, action, ...
- :param params: user parameters for the operation
- :return: dictionary following SOL005 format
- """
- # Raise exception if invalid arguments
- if not (nsr_id and operation and params):
- raise LcmException(
- "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
- )
- now = time()
- _id = str(uuid4())
- nslcmop = {
- "id": _id,
- "_id": _id,
- # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
- "operationState": "PROCESSING",
- "statusEnteredTime": now,
- "nsInstanceId": nsr_id,
- "lcmOperationType": operation,
- "startTime": now,
- "isAutomaticInvocation": False,
- "operationParams": params,
- "isCancelPending": False,
- "links": {
- "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
- "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
- },
- }
- return nslcmop
-
def _format_additional_params(self, params):
params = params or {}
for key, value in params.items():
# Function to return execution_environment id
- def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
- # TODO vdu_index_count
- for vca in vca_deployed_list:
- if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
- return vca["ee_id"]
-
async def destroy_N2VC(
self,
logging_text,
) and vca.get("needed_terminate")
# For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
# pending native charms
- destroy_ee = (
- True if vca_type in ("helm", "helm-v3", "native_charm") else False
- )
+ destroy_ee = True if vca_type in ("helm-v3", "native_charm") else False
# self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
# vca_index, vca.get("ee_id"), vca_type, destroy_ee))
task = asyncio.ensure_future(
# Delete Namespace and Certificates if necessary
if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
await self.vca_map["helm-v3"].delete_tls_certificate(
- certificate_name=db_nslcmop["nsInstanceId"],
+ namespace=db_nslcmop["nsInstanceId"],
+ certificate_name=self.EE_TLS_NAME,
+ )
+ await self.vca_map["helm-v3"].delete_namespace(
+ namespace=db_nslcmop["nsInstanceId"],
)
- # TODO: Delete namespace
# Delete from k8scluster
stage[1] = "Deleting KDUs."
"operationState": nslcmop_operation_state,
"autoremove": autoremove,
},
- loop=self.loop,
)
except Exception as e:
self.logger.error(
)
)
# wait and retry
- await asyncio.sleep(retries_interval, loop=self.loop)
+ await asyncio.sleep(retries_interval)
else:
if isinstance(e, asyncio.TimeoutError):
e = N2VCException(
kdu_action = (
True
if primitive_name in actions
- and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
+ and kdu["k8scluster-type"] != "helm-chart-v3"
else False
)
"nslcmop_id": nslcmop_id,
"operationState": nslcmop_operation_state,
},
- loop=self.loop,
)
except Exception as e:
self.logger.error(
# add chart to list and all parameters
step = "Getting helm chart name"
chart_name = ee_item.get("helm-chart")
- if (
- ee_item.get("helm-version")
- and ee_item.get("helm-version") == "v2"
- ):
- vca_type = "helm"
- else:
- vca_type = "helm-v3"
+ vca_type = "helm-v3"
step = "Setting Helm chart artifact paths"
helm_artifacts.append(
and member_vnf_index
):
msg.update({"vnf_member_index": member_vnf_index})
- await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
+ await self.msg.aiowrite("ns", change_type, msg)
except Exception as e:
self.logger.error(
logging_text + "kafka_write notification Exception {}".format(e)
if kdur.get("helm-chart"):
k8s_cluster_type = "helm-chart-v3"
self.logger.debug("kdur: {}".format(kdur))
- if (
- kdur.get("helm-version")
- and kdur.get("helm-version") == "v2"
- ):
- k8s_cluster_type = "helm-chart"
elif kdur.get("juju-bundle"):
k8s_cluster_type = "juju-bundle"
else:
if kdur.get("helm-chart"):
k8s_cluster_type = "helm-chart-v3"
self.logger.debug("kdur: {}".format(kdur))
- if (
- kdur.get("helm-version")
- and kdur.get("helm-version") == "v2"
- ):
- k8s_cluster_type = "helm-chart"
elif kdur.get("juju-bundle"):
k8s_cluster_type = "juju-bundle"
else:
"nslcmop_id": nslcmop_id,
"operationState": nslcmop_operation_state,
}
- await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
+ await self.msg.aiowrite("ns", "scaled", msg)
except Exception as e:
self.logger.error(
logging_text + "kafka_write notification Exception {}".format(e)
)
if not job_file:
return
+ self.logger.debug("Artifact path{}".format(artifact_path))
+ self.logger.debug("job file{}".format(job_file))
with self.fs.file_open((artifact_path, job_file), "r") as f:
job_data = f.read()
kdur_name = kdur.get("name")
break
- await asyncio.sleep(10, loop=self.loop)
+ await asyncio.sleep(10)
else:
if vdu_id and vdu_index is not None:
raise LcmException(
f"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
)
- # TODO get_service
- _, _, service = ee_id.partition(".") # remove prefix "namespace."
- host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
- host_port = "80"
- vnfr_id = vnfr_id.replace("-", "")
- variables = {
- "JOB_NAME": vnfr_id,
- "TARGET_IP": target_ip,
- "EXPORTER_POD_IP": host_name,
- "EXPORTER_POD_PORT": host_port,
- "NSR_ID": nsr_id,
- "VNF_MEMBER_INDEX": vnf_member_index,
- "VDUR_NAME": vdur_name,
- "KDUR_NAME": kdur_name,
- "ELEMENT_TYPE": element_type,
- }
+ if ee_id is not None:
+ _, namespace, helm_id = get_ee_id_parts(
+ ee_id
+ ) # get namespace and EE gRPC service name
+ host_name = f'{helm_id}-{ee_config_descriptor["metric-service"]}.{namespace}.svc' # svc_name.namespace.svc
+ host_port = "80"
+ vnfr_id = vnfr_id.replace("-", "")
+ variables = {
+ "JOB_NAME": vnfr_id,
+ "TARGET_IP": target_ip,
+ "EXPORTER_POD_IP": host_name,
+ "EXPORTER_POD_PORT": host_port,
+ "NSR_ID": nsr_id,
+ "VNF_MEMBER_INDEX": vnf_member_index,
+ "VDUR_NAME": vdur_name,
+ "KDUR_NAME": kdur_name,
+ "ELEMENT_TYPE": element_type,
+ }
+ else:
+ metric_path = ee_config_descriptor["metric-path"]
+ target_port = ee_config_descriptor["metric-port"]
+ vnfr_id = vnfr_id.replace("-", "")
+ variables = {
+ "JOB_NAME": vnfr_id,
+ "TARGET_IP": target_ip,
+ "TARGET_PORT": target_port,
+ "METRIC_PATH": metric_path,
+ }
+
job_list = parse_job(job_data, variables)
# ensure job_name is using the vnfr_id. Adding the metadata nsr_id
for job in job_list:
not isinstance(job.get("job_name"), str)
or vnfr_id not in job["job_name"]
):
- job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
+ job["job_name"] = vnfr_id + "_" + str(SystemRandom().randint(1, 10000))
job["nsr_id"] = nsr_id
job["vnfr_id"] = vnfr_id
return job_list
)
return "FAILED", "Error in operate VNF {}".format(exc)
- def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
- """
- Get VCA Cloud and VCA Cloud Credentials for the VIM account
-
- :param: vim_account_id: VIM Account ID
-
- :return: (cloud_name, cloud_credential)
- """
- config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
- return config.get("vca_cloud"), config.get("vca_cloud_credential")
-
- def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
- """
- Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
-
- :param: vim_account_id: VIM Account ID
-
- :return: (cloud_name, cloud_credential)
- """
- config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
- return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
-
async def migrate(self, nsr_id, nslcmop_id):
"""
Migrate VNFs and VDUs instances in a NS
"nslcmop_id": nslcmop_id,
"operationState": nslcmop_operation_state,
}
- await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
+ await self.msg.aiowrite("ns", "migrated", msg)
except Exception as e:
self.logger.error(
logging_text + "kafka_write notification Exception {}".format(e)
for target_vdu in target_vdu_list:
deploy_params_vdu = target_vdu
# Set run-day1 vnf level value if not vdu level value exists
- if not deploy_params_vdu.get("run-day1") and target_vnf[
- "additionalParams"
- ].get("run-day1"):
+ if not deploy_params_vdu.get("run-day1") and target_vnf.get(
+ "additionalParams", {}
+ ).get("run-day1"):
deploy_params_vdu["run-day1"] = target_vnf[
"additionalParams"
].get("run-day1")
"nslcmop_id": nslcmop_id,
"operationState": nslcmop_operation_state,
}
- await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
+ await self.msg.aiowrite("ns", "healed", msg)
except Exception as e:
self.logger.error(
logging_text + "kafka_write notification Exception {}".format(e)
vca_type = "native_charm"
elif ee_item.get("helm-chart"):
vca_name = ee_item["helm-chart"]
- if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
- vca_type = "helm"
- else:
- vca_type = "helm-v3"
+ vca_type = "helm-v3"
else:
self.logger.debug(
logging_text + "skipping non juju neither charm configuration"
# if SSH access is required, then get execution environment SSH public
# if native charm we have waited already to VM be UP
- if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
+ if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
pub_key = None
user = None
# self.logger.debug("get ssh key block")
self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
if operational_status_ro != "healing":
break
- await asyncio.sleep(15, loop=self.loop)
+ await asyncio.sleep(15)
else: # timeout_ns_deploy
raise NgRoException("Timeout waiting ns to deploy")
"nslcmop_id": nslcmop_id,
"operationState": nslcmop_operation_state,
}
- await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
+ await self.msg.aiowrite("ns", "verticalscaled", msg)
except Exception as e:
self.logger.error(
logging_text + "kafka_write notification Exception {}".format(e)