import logging
import logging.handlers
import traceback
+import ipaddress
import json
from jinja2 import (
Environment,
from osm_lcm.ng_ro import NgRoClient, NgRoException
from osm_lcm.lcm_utils import (
LcmException,
- LcmExceptionNoMgmtIP,
LcmBase,
deep_get,
get_iterable,
from osm_lcm.data_utils.dict_utils import parse_yaml_strings
from osm_lcm.data_utils.database.vim_account import VimAccountDB
from n2vc.definitions import RelationEndpoint
-from n2vc.k8s_helm_conn import K8sHelmConnector
from n2vc.k8s_helm3_conn import K8sHelm3Connector
from n2vc.k8s_juju_conn import K8sJujuConnector
on_update_db=self._on_update_n2vc_db,
)
- self.k8sclusterhelm2 = K8sHelmConnector(
- kubectl_command=self.vca_config.kubectlpath,
- helm_command=self.vca_config.helmpath,
- log=self.logger,
- on_update_db=None,
- fs=self.fs,
- db=self.db,
- )
-
self.k8sclusterhelm3 = K8sHelm3Connector(
kubectl_command=self.vca_config.kubectlpath,
helm_command=self.vca_config.helm3path,
)
self.k8scluster_map = {
- "helm-chart": self.k8sclusterhelm2,
"helm-chart-v3": self.k8sclusterhelm3,
"chart": self.k8sclusterhelm3,
"juju-bundle": self.k8sclusterjuju,
if not isinstance(ip_mac, str):
return ip_mac
try:
+ next_ipv6 = None
+ next_ipv4 = None
+ dual_ip = ip_mac.split(";")
+ if len(dual_ip) == 2:
+ for ip in dual_ip:
+ if ipaddress.ip_address(ip).version == 6:
+ ipv6 = ipaddress.IPv6Address(ip)
+ next_ipv6 = str(ipaddress.IPv6Address(int(ipv6) + 1))
+ elif ipaddress.ip_address(ip).version == 4:
+ ipv4 = ipaddress.IPv4Address(ip)
+ next_ipv4 = str(ipaddress.IPv4Address(int(ipv4) + 1))
+ return [next_ipv4, next_ipv6]
# try with ipv4 look for last dot
i = ip_mac.rfind(".")
if i > 0:
pass
return None
- def _on_update_ro_db(self, nsrs_id, ro_descriptor):
- # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
-
- try:
- # TODO filter RO descriptor fields...
-
- # write to database
- db_dict = dict()
- # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
- db_dict["deploymentStatus"] = ro_descriptor
- self.update_db_2("nsrs", nsrs_id, db_dict)
-
- except Exception as e:
- self.logger.warn(
- "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
- )
-
async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
# remove last dot from path (if exists)
if path.endswith("."):
nsr = self.db.get_one(table="nsrs", q_filter=filter)
current_ns_status = nsr.get("nsState")
- # get vca status for NS
+ # First, we need to verify if the current vcaStatus is null, because if that is the case,
+ # MongoDB will not be able to create the fields used within the update key in the database
+ if not nsr.get("vcaStatus"):
+ # Write an empty dictionary to the vcaStatus field, it its value is null
+ self.update_db_2("nsrs", nsr_id, {"vcaStatus": dict()})
+
+ # Get vca status for NS
status_dict = await self.n2vc.get_status(
namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
)
- # vcaStatus
+ # Update the vcaStatus
+ db_key = f"vcaStatus.{nsr_id}.VNF"
db_dict = dict()
- db_dict["vcaStatus"] = status_dict
+
+ db_dict[db_key] = status_dict[nsr_id]
+ await self.n2vc.update_vca_status(db_dict[db_key], vca_id=vca_id)
# update configurationStatus for this VCA
try:
vca_id=vca_id,
)
- # vcaStatus
+ # First, we need to verify if the current vcaStatus is null, because if that is the case,
+ # MongoDB will not be able to create the fields used within the update key in the database
+ nsr = self.db.get_one(table="nsrs", q_filter=filter)
+ if not nsr.get("vcaStatus"):
+ # Write an empty dictionary to the vcaStatus field, it its value is null
+ self.update_db_2("nsrs", nsr_id, {"vcaStatus": dict()})
+
+ # Update the vcaStatus
+ db_key = f"vcaStatus.{nsr_id}.KNF"
db_dict = dict()
- db_dict["vcaStatus"] = {nsr_id: vca_status}
+
+ db_dict[db_key] = vca_status
+
+ if cluster_type in ("juju-bundle", "juju"):
+ # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA
+ # status in a similar way between Juju Bundles and Helm Charts on this side
+ await self.k8sclusterjuju.update_vca_status(
+ db_dict[db_key],
+ kdu_instance,
+ vca_id=vca_id,
+ )
self.logger.debug(
f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
additional_params = vdur.get("additionalParams")
return parse_yaml_strings(additional_params)
- def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
- """
- Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
- :param vnfd: input vnfd
- :param new_id: overrides vnf id if provided
- :param additionalParams: Instantiation params for VNFs provided
- :param nsrId: Id of the NSR
- :return: copy of vnfd
- """
- vnfd_RO = deepcopy(vnfd)
- # remove unused by RO configuration, monitoring, scaling and internal keys
- vnfd_RO.pop("_id", None)
- vnfd_RO.pop("_admin", None)
- vnfd_RO.pop("monitoring-param", None)
- vnfd_RO.pop("scaling-group-descriptor", None)
- vnfd_RO.pop("kdu", None)
- vnfd_RO.pop("k8s-cluster", None)
- if new_id:
- vnfd_RO["id"] = new_id
-
- # parse cloud-init or cloud-init-file with the provided variables using Jinja2
- for vdu in get_iterable(vnfd_RO, "vdu"):
- vdu.pop("cloud-init-file", None)
- vdu.pop("cloud-init", None)
- return vnfd_RO
-
@staticmethod
def ip_profile_2_RO(ip_profile):
RO_ip_profile = deepcopy(ip_profile)
RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
return RO_ip_profile
- def _get_ro_vim_id_for_vim_account(self, vim_account):
- db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
- if db_vim["_admin"]["operationalState"] != "ENABLED":
- raise LcmException(
- "VIM={} is not available. operationalState={}".format(
- vim_account, db_vim["_admin"]["operationalState"]
- )
- )
- RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
- return RO_vim_id
-
- def get_ro_wim_id_for_wim_account(self, wim_account):
- if isinstance(wim_account, str):
- db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
- if db_wim["_admin"]["operationalState"] != "ENABLED":
- raise LcmException(
- "WIM={} is not available. operationalState={}".format(
- wim_account, db_wim["_admin"]["operationalState"]
- )
- )
- RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
- return RO_wim_id
- else:
- return wim_account
-
def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
db_vdu_push_list = []
template_vdur = []
except DbException as e:
self.logger.error("Cannot update vnf. {}".format(e))
- def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
- """
- Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
- :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
- :param nsr_desc_RO: nsr descriptor from RO
- :return: Nothing, LcmException is raised on errors
- """
- for vnf_index, db_vnfr in db_vnfrs.items():
- for vnf_RO in nsr_desc_RO["vnfs"]:
- if vnf_RO["member_vnf_index"] != vnf_index:
- continue
- vnfr_update = {}
- if vnf_RO.get("ip_address"):
- db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
- "ip_address"
- ].split(";")[0]
- elif not db_vnfr.get("ip-address"):
- if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
- raise LcmExceptionNoMgmtIP(
- "ns member_vnf_index '{}' has no IP address".format(
- vnf_index
- )
- )
-
- for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
- vdur_RO_count_index = 0
- if vdur.get("pdu-type"):
- continue
- for vdur_RO in get_iterable(vnf_RO, "vms"):
- if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
- continue
- if vdur["count-index"] != vdur_RO_count_index:
- vdur_RO_count_index += 1
- continue
- vdur["vim-id"] = vdur_RO.get("vim_vm_id")
- if vdur_RO.get("ip_address"):
- vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
- else:
- vdur["ip-address"] = None
- vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
- vdur["name"] = vdur_RO.get("vim_name")
- vdur["status"] = vdur_RO.get("status")
- vdur["status-detailed"] = vdur_RO.get("error_msg")
- for ifacer in get_iterable(vdur, "interfaces"):
- for interface_RO in get_iterable(vdur_RO, "interfaces"):
- if ifacer["name"] == interface_RO.get("internal_name"):
- ifacer["ip-address"] = interface_RO.get(
- "ip_address"
- )
- ifacer["mac-address"] = interface_RO.get(
- "mac_address"
- )
- break
- else:
- raise LcmException(
- "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
- "from VIM info".format(
- vnf_index, vdur["vdu-id-ref"], ifacer["name"]
- )
- )
- vnfr_update["vdur.{}".format(vdu_index)] = vdur
- break
- else:
- raise LcmException(
- "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
- "VIM info".format(
- vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
- )
- )
-
- for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
- for net_RO in get_iterable(nsr_desc_RO, "nets"):
- if vld["id"] != net_RO.get("vnf_net_osm_id"):
- continue
- vld["vim-id"] = net_RO.get("vim_net_id")
- vld["name"] = net_RO.get("vim_name")
- vld["status"] = net_RO.get("status")
- vld["status-detailed"] = net_RO.get("error_msg")
- vnfr_update["vld.{}".format(vld_index)] = vld
- break
- else:
- raise LcmException(
- "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
- vnf_index, vld["id"]
- )
- )
-
- self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
- break
-
- else:
- raise LcmException(
- "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
- vnf_index
- )
- )
-
def _get_ns_config_info(self, nsr_id):
"""
Generates a mapping between vnf,vdu elements and the N2VC id
vca_id = self.get_vca_id(db_vnfr, db_nsr)
# create or register execution environment in VCA
- if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
+ if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm-v3"):
self._write_configuration_status(
nsr_id=nsr_id,
vca_index=vca_index,
db_dict=db_dict,
vca_id=vca_id,
)
- elif vca_type == "helm" or vca_type == "helm-v3":
+ elif vca_type == "helm-v3":
ee_id, credentials = await self.vca_map[
vca_type
].create_execution_environment(
# if SSH access is required, then get execution environment SSH public
# if native charm we have waited already to VM be UP
- if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
+ if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
pub_key = None
user = None
# self.logger.debug("get ssh key block")
# TODO register in database that primitive is done
# STEP 7 Configure metrics
- if vca_type == "helm" or vca_type == "helm-v3":
+ if vca_type == "helm-v3":
# TODO: review for those cases where the helm chart is a reference and
# is not part of the NF package
prometheus_jobs = await self.extract_prometheus_scrape_jobs(
stage[1] = stage[2] = ""
except asyncio.CancelledError:
error_list.append("Cancelled")
- # TODO cancel all tasks
+ await self._cancel_pending_tasks(logging_text, tasks_dict_info)
+ await self._wait_for_tasks(
+ logging_text,
+ tasks_dict_info,
+ timeout_ns_deploy,
+ stage,
+ nslcmop_id,
+ nsr_id=nsr_id,
+ )
except Exception as exc:
error_list.append(str(exc))
"nsr_id": nsr_id,
"nslcmop_id": nslcmop_id,
"operationState": nslcmop_operation_state,
+ "startTime": db_nslcmop["startTime"],
+ "links": db_nslcmop["links"],
+ "operationParams": {
+ "nsInstanceId": nsr_id,
+ "nsdId": db_nsr["nsd-id"],
+ },
},
)
except Exception as e:
vnfr_data.get("_id"),
{"kdur.{}.status".format(kdu_index): "ERROR"},
)
- except Exception:
+ except Exception as error:
# ignore to keep original exception
- pass
+ self.logger.warning(
+ f"An exception occurred while updating DB: {str(error)}"
+ )
# reraise original error
raise
k8scluster_id_2_uuic = {
"helm-chart-v3": {},
- "helm-chart": {},
"juju-bundle": {},
}
# Default version: helm3, if helm-version is v2 assign v2
k8sclustertype = "helm-chart-v3"
self.logger.debug("kdur: {}".format(kdur))
- if (
- kdur.get("helm-version")
- and kdur.get("helm-version") == "v2"
- ):
- k8sclustertype = "helm-chart"
elif kdur.get("juju-bundle"):
kdumodel = kdur["juju-bundle"]
k8sclustertype = "juju-bundle"
kdumodel = self.fs.path + filename
except (asyncio.TimeoutError, asyncio.CancelledError):
raise
- except Exception: # it is not a file
- pass
+ except Exception as e: # it is not a file
+ self.logger.warning(f"An exception occurred: {str(e)}")
k8s_cluster_id = kdur["k8s-cluster"]["id"]
step = "Synchronize repos for k8s cluster '{}'".format(
)
)
ee_descriptor_id = ee_item.get("id")
- if ee_item.get("juju"):
- vca_name = ee_item["juju"].get("charm")
- if get_charm_name:
- charm_name = self.find_charm_name(db_nsr, str(vca_name))
- vca_type = (
- "lxc_proxy_charm"
- if ee_item["juju"].get("charm") is not None
- else "native_charm"
- )
- if ee_item["juju"].get("cloud") == "k8s":
- vca_type = "k8s_proxy_charm"
- elif ee_item["juju"].get("proxy") is False:
- vca_type = "native_charm"
- elif ee_item.get("helm-chart"):
- vca_name = ee_item["helm-chart"]
- if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
- vca_type = "helm"
- else:
- vca_type = "helm-v3"
- else:
+ vca_name, charm_name, vca_type = self.get_vca_info(
+ ee_item, db_nsr, get_charm_name
+ )
+ if not vca_type:
self.logger.debug(
- logging_text + "skipping non juju neither charm configuration"
+ logging_text + "skipping, non juju/charm/helm configuration"
)
continue
member_vnf_index or "", vdu_id or ""
)
- @staticmethod
- def _create_nslcmop(nsr_id, operation, params):
- """
- Creates a ns-lcm-opp content to be stored at database.
- :param nsr_id: internal id of the instance
- :param operation: instantiate, terminate, scale, action, ...
- :param params: user parameters for the operation
- :return: dictionary following SOL005 format
- """
- # Raise exception if invalid arguments
- if not (nsr_id and operation and params):
- raise LcmException(
- "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
- )
- now = time()
- _id = str(uuid4())
- nslcmop = {
- "id": _id,
- "_id": _id,
- # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
- "operationState": "PROCESSING",
- "statusEnteredTime": now,
- "nsInstanceId": nsr_id,
- "lcmOperationType": operation,
- "startTime": now,
- "isAutomaticInvocation": False,
- "operationParams": params,
- "isCancelPending": False,
- "links": {
- "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
- "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
- },
- }
- return nslcmop
-
def _format_additional_params(self, params):
params = params or {}
for key, value in params.items():
# Function to return execution_environment id
- def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
- # TODO vdu_index_count
- for vca in vca_deployed_list:
- if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
- return vca.get("ee_id")
-
async def destroy_N2VC(
self,
logging_text,
) and vca.get("needed_terminate")
# For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
# pending native charms
- destroy_ee = (
- True if vca_type in ("helm", "helm-v3", "native_charm") else False
- )
+ destroy_ee = True if vca_type in ("helm-v3", "native_charm") else False
# self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
# vca_index, vca.get("ee_id"), vca_type, destroy_ee))
task = asyncio.ensure_future(
if nsr_deployed.get("VCA"):
stage[1] = "Deleting all execution environments."
self.logger.debug(logging_text + stage[1])
- vca_id = self.get_vca_id({}, db_nsr)
- task_delete_ee = asyncio.ensure_future(
- asyncio.wait_for(
- self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
- timeout=self.timeout.charm_delete,
+ helm_vca_list = get_deployed_vca(db_nsr, {"type": "helm-v3"})
+ if helm_vca_list:
+ # Delete Namespace and Certificates
+ await self.vca_map["helm-v3"].delete_tls_certificate(
+ namespace=db_nslcmop["nsInstanceId"],
+ certificate_name=self.EE_TLS_NAME,
)
- )
- # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
- tasks_dict_info[task_delete_ee] = "Terminating all VCA"
-
- # Delete Namespace and Certificates if necessary
- if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())):
- await self.vca_map["helm-v3"].delete_tls_certificate(
- namespace=db_nslcmop["nsInstanceId"],
- certificate_name=self.EE_TLS_NAME,
- )
- await self.vca_map["helm-v3"].delete_namespace(
- namespace=db_nslcmop["nsInstanceId"],
- )
+ await self.vca_map["helm-v3"].delete_namespace(
+ namespace=db_nslcmop["nsInstanceId"],
+ )
+ else:
+ vca_id = self.get_vca_id({}, db_nsr)
+ task_delete_ee = asyncio.ensure_future(
+ asyncio.wait_for(
+ self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
+ timeout=self.timeout.charm_delete,
+ )
+ )
+ tasks_dict_info[task_delete_ee] = "Terminating all VCA"
# Delete from k8scluster
stage[1] = "Deleting KDUs."
stage[1] = stage[2] = ""
except asyncio.CancelledError:
error_list.append("Cancelled")
- # TODO cancell all tasks
+ await self._cancel_pending_tasks(logging_text, tasks_dict_info)
+ await self._wait_for_tasks(
+ logging_text,
+ tasks_dict_info,
+ timeout_ns_terminate,
+ stage,
+ nslcmop_id,
+ )
except Exception as exc:
error_list.append(str(exc))
# update status at database
self._write_op_status(nslcmop_id, stage)
return error_detail_list
+ async def _cancel_pending_tasks(self, logging_text, created_tasks_info):
+ for task, name in created_tasks_info.items():
+ self.logger.debug(logging_text + "Cancelling task: " + name)
+ task.cancel()
+
@staticmethod
def _map_primitive_params(primitive_desc, params, instantiation_params):
"""
vca_id=vca_id,
cluster_type=cluster_type,
)
- else:
+ if db_nsr["_admin"]["deployed"]["VCA"]:
for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
table, filter = "nsrs", {"_id": nsr_id}
path = "_admin.deployed.VCA.{}.".format(vca_index)
kdu_action = (
True
if primitive_name in actions
- and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
+ and kdu["k8scluster-type"] != "helm-chart-v3"
else False
)
# add chart to list and all parameters
step = "Getting helm chart name"
chart_name = ee_item.get("helm-chart")
- if (
- ee_item.get("helm-version")
- and ee_item.get("helm-version") == "v2"
- ):
- vca_type = "helm"
- else:
- vca_type = "helm-v3"
+ vca_type = "helm-v3"
step = "Setting Helm chart artifact paths"
helm_artifacts.append(
)
)
+ step = "Checking whether the descriptor has SFC"
+ if db_nsr.get("nsd", {}).get("vnffgd"):
+ raise LcmException(
+ "Ns update is not allowed for NS with SFC"
+ )
+
# There is no change in the charm package, then redeploy the VNF
# based on new descriptor
step = "Redeploying VNF"
if result == "FAILED":
nslcmop_operation_state = result
error_description_nslcmop = detailed_status
+ old_operational_status = "failed"
db_nslcmop_update["detailed-status"] = detailed_status
+ db_nsr_update["detailed-status"] = detailed_status
+ scaling_aspect = get_scaling_aspect(latest_vnfd)
+ scaling_group_desc = db_nsr.get("_admin").get(
+ "scaling-group", None
+ )
+ if scaling_group_desc:
+ for aspect in scaling_aspect:
+ scaling_group_id = aspect.get("id")
+ for scale_index, scaling_group in enumerate(
+ scaling_group_desc
+ ):
+ if scaling_group.get("name") == scaling_group_id:
+ db_nsr_update[
+ "_admin.scaling-group.{}.nb-scale-op".format(
+ scale_index
+ )
+ ] = 0
self.logger.debug(
logging_text
+ " step {} Done with result {} {}".format(
nslcmop_operation_state, detailed_status
)
)
+ elif update_type == "VERTICAL_SCALE":
+ self.logger.debug(
+ "Prepare for VERTICAL_SCALE update operation {}".format(db_nslcmop)
+ )
+ # Get the input parameters given through update request
+ vnf_instance_id = db_nslcmop["operationParams"]["verticalScaleVnf"].get(
+ "vnfInstanceId"
+ )
+
+ vnfd_id = db_nslcmop["operationParams"]["verticalScaleVnf"].get(
+ "vnfdId"
+ )
+ step = "Getting vnfr from database"
+ db_vnfr = self.db.get_one(
+ "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
+ )
+ self.logger.debug(step)
+ step = "Getting vnfds from database"
+ self.logger.debug("Start" + step)
+ # Latest VNFD
+ latest_vnfd = self.db.get_one(
+ "vnfds", {"_id": vnfd_id}, fail_on_empty=False
+ )
+ latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
+ # Current VNFD
+ current_vnf_revision = db_vnfr.get("revision", 1)
+ current_vnfd = self.db.get_one(
+ "vnfds_revisions",
+ {"_id": vnfd_id + ":" + str(current_vnf_revision)},
+ fail_on_empty=False,
+ )
+ self.logger.debug("End" + step)
+ # verify flavor changes
+ step = "Checking for flavor change"
+ if find_software_version(current_vnfd) != find_software_version(
+ latest_vnfd
+ ):
+ self.logger.debug("Start" + step)
+ if current_vnfd.get("virtual-compute-desc") == latest_vnfd.get(
+ "virtual-compute-desc"
+ ) and current_vnfd.get("virtual-storage-desc") == latest_vnfd.get(
+ "virtual-storage-desc"
+ ):
+ raise LcmException(
+ "No change in flavor check vnfd {}".format(vnfd_id)
+ )
+ else:
+ raise LcmException(
+ "No change in software_version of vnfd {}".format(vnfd_id)
+ )
+
+ self.logger.debug("End" + step)
+
+ (result, detailed_status) = await self.vertical_scale(
+ nsr_id, nslcmop_id
+ )
+ self.logger.debug(
+ "vertical_scale result: {} detailed_status :{}".format(
+ result, detailed_status
+ )
+ )
+ if result == "FAILED":
+ nslcmop_operation_state = result
+ error_description_nslcmop = detailed_status
+ db_nslcmop_update["detailed-status"] = detailed_status
+ if not nslcmop_operation_state:
+ nslcmop_operation_state = "COMPLETED"
+ self.logger.debug(
+ logging_text
+ + " task Done with result {} {}".format(
+ nslcmop_operation_state, detailed_status
+ )
+ )
# If nslcmop_operation_state is None, so any operation is not failed.
# All operations are executed in overall.
old_operational_status = ""
old_config_status = ""
nsi_id = None
+ prom_job_name = ""
try:
# wait for any previous tasks in process
step = "Waiting for previous operations to terminate"
old_operational_status = db_nsr["operational-status"]
old_config_status = db_nsr["config-status"]
+ step = "Checking whether the descriptor has SFC"
+ if db_nsr.get("nsd", {}).get("vnffgd"):
+ raise LcmException("Scaling is not allowed for NS with SFC")
+
step = "Parsing scaling parameters"
db_nsr_update["operational-status"] = "scaling"
self.update_db_2("nsrs", nsr_id, db_nsr_update)
nsr_id,
{
"_admin.scaling-group": [
- {"name": scaling_group, "nb-scale-op": 0}
+ {
+ "name": scaling_group,
+ "vnf_index": vnf_index,
+ "nb-scale-op": 0,
+ }
]
},
)
for admin_scale_index, admin_scale_info in enumerate(
db_nsr["_admin"]["scaling-group"]
):
- if admin_scale_info["name"] == scaling_group:
+ if (
+ admin_scale_info["name"] == scaling_group
+ and admin_scale_info["vnf_index"] == vnf_index
+ ):
nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
break
else: # not found, set index one plus last element and add new entry with the name
db_nsr_update[
"_admin.scaling-group.{}.name".format(admin_scale_index)
] = scaling_group
+ db_nsr_update[
+ "_admin.scaling-group.{}.vnf_index".format(admin_scale_index)
+ ] = vnf_index
vca_scaling_info = []
scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
vdud = get_vdu(db_vnfd, vdu_delta["id"])
# vdu_index also provides the number of instance of the targeted vdu
vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
+ if vdu_index <= len(db_vnfr["vdur"]):
+ vdu_name_id = db_vnfr["vdur"][vdu_index - 1]["vdu-name"]
+ prom_job_name = (
+ db_vnfr["_id"] + vdu_name_id + str(vdu_index - 1)
+ )
+ prom_job_name = prom_job_name.replace("_", "")
+ prom_job_name = prom_job_name.replace("-", "")
+ else:
+ prom_job_name = None
cloud_init_text = self._get_vdu_cloud_init_content(
vdud, db_vnfd
)
if kdur.get("helm-chart"):
k8s_cluster_type = "helm-chart-v3"
self.logger.debug("kdur: {}".format(kdur))
- if (
- kdur.get("helm-version")
- and kdur.get("helm-version") == "v2"
- ):
- k8s_cluster_type = "helm-chart"
elif kdur.get("juju-bundle"):
k8s_cluster_type = "juju-bundle"
else:
if kdur.get("helm-chart"):
k8s_cluster_type = "helm-chart-v3"
self.logger.debug("kdur: {}".format(kdur))
- if (
- kdur.get("helm-version")
- and kdur.get("helm-version") == "v2"
- ):
- k8s_cluster_type = "helm-chart"
elif kdur.get("juju-bundle"):
k8s_cluster_type = "juju-bundle"
else:
db_nsr_update["config-status"] = old_config_status
scale_process = None
# POST-SCALE END
+ # Check if each vnf has exporter for metric collection if so update prometheus job records
+ if scaling_type == "SCALE_OUT":
+ if "exporters-endpoints" in db_vnfd.get("df")[0]:
+ vnfr_id = db_vnfr["id"]
+ db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
+ exporter_config = db_vnfd.get("df")[0].get("exporters-endpoints")
+ self.logger.debug("exporter config :{}".format(exporter_config))
+ artifact_path = "{}/{}/{}".format(
+ base_folder["folder"],
+ base_folder["pkg-dir"],
+ "exporter-endpoint",
+ )
+ ee_id = None
+ ee_config_descriptor = exporter_config
+ rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
+ logging_text,
+ nsr_id,
+ vnfr_id,
+ vdu_id=db_vnfr["vdur"][-1]["vdu-id-ref"],
+ vdu_index=db_vnfr["vdur"][-1]["count-index"],
+ user=None,
+ pub_key=None,
+ )
+ self.logger.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip))
+ self.logger.debug("Artifact_path:{}".format(artifact_path))
+ vdu_id_for_prom = None
+ vdu_index_for_prom = None
+ for x in get_iterable(db_vnfr, "vdur"):
+ vdu_id_for_prom = x.get("vdu-id-ref")
+ vdu_index_for_prom = x.get("count-index")
+ vnfr_id = vnfr_id + vdu_id + str(vdu_index)
+ vnfr_id = vnfr_id.replace("_", "")
+ prometheus_jobs = await self.extract_prometheus_scrape_jobs(
+ ee_id=ee_id,
+ artifact_path=artifact_path,
+ ee_config_descriptor=ee_config_descriptor,
+ vnfr_id=vnfr_id,
+ nsr_id=nsr_id,
+ target_ip=rw_mgmt_ip,
+ element_type="VDU",
+ vdu_id=vdu_id_for_prom,
+ vdu_index=vdu_index_for_prom,
+ )
+ self.logger.debug("Prometheus job:{}".format(prometheus_jobs))
+ if prometheus_jobs:
+ db_nsr_update[
+ "_admin.deployed.prometheus_jobs"
+ ] = prometheus_jobs
+ self.update_db_2(
+ "nsrs",
+ nsr_id,
+ db_nsr_update,
+ )
+
+ for job in prometheus_jobs:
+ self.db.set_one(
+ "prometheus_jobs",
+ {"job_name": ""},
+ job,
+ upsert=True,
+ fail_on_empty=False,
+ )
db_nsr_update[
"detailed-status"
] = "" # "scaled {} {}".format(scaling_group, scaling_type)
exc_info=True,
)
finally:
+ error_list = list()
+ if exc:
+ error_list.append(str(exc))
self._write_ns_status(
nsr_id=nsr_id,
ns_state=None,
current_operation="IDLE",
current_operation_id=None,
)
- if tasks_dict_info:
- stage[1] = "Waiting for instantiate pending tasks."
- self.logger.debug(logging_text + stage[1])
- exc = await self._wait_for_tasks(
+ try:
+ if tasks_dict_info:
+ stage[1] = "Waiting for instantiate pending tasks."
+ self.logger.debug(logging_text + stage[1])
+ exc = await self._wait_for_tasks(
+ logging_text,
+ tasks_dict_info,
+ self.timeout.ns_deploy,
+ stage,
+ nslcmop_id,
+ nsr_id=nsr_id,
+ )
+ except asyncio.CancelledError:
+ error_list.append("Cancelled")
+ await self._cancel_pending_tasks(logging_text, tasks_dict_info)
+ await self._wait_for_tasks(
logging_text,
tasks_dict_info,
self.timeout.ns_deploy,
nslcmop_id,
nsr_id=nsr_id,
)
- if exc:
+ if error_list:
+ error_detail = "; ".join(error_list)
db_nslcmop_update[
"detailed-status"
- ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
+ ] = error_description_nslcmop = "FAILED {}: {}".format(
+ step, error_detail
+ )
nslcmop_operation_state = "FAILED"
if db_nsr:
db_nsr_update["operational-status"] = old_operational_status
db_nsr_update[
"detailed-status"
] = "FAILED scaling nslcmop={} {}: {}".format(
- nslcmop_id, step, exc
+ nslcmop_id, step, error_detail
)
else:
error_description_nslcmop = None
nslcmop_operation_state = "COMPLETED"
db_nslcmop_update["detailed-status"] = "Done"
+ if scaling_type == "SCALE_IN" and prom_job_name is not None:
+ self.db.del_one(
+ "prometheus_jobs",
+ {"job_name": prom_job_name},
+ fail_on_empty=False,
+ )
self._write_op_status(
op_id=nslcmop_id,
)
return "FAILED", "Error in operate VNF {}".format(exc)
- def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
- """
- Get VCA Cloud and VCA Cloud Credentials for the VIM account
-
- :param: vim_account_id: VIM Account ID
-
- :return: (cloud_name, cloud_credential)
- """
- config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
- return config.get("vca_cloud"), config.get("vca_cloud_credential")
-
- def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
- """
- Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
-
- :param: vim_account_id: VIM Account ID
-
- :return: (cloud_name, cloud_credential)
- """
- config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
- return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
-
async def migrate(self, nsr_id, nslcmop_id):
"""
Migrate VNFs and VDUs instances in a NS
old_config_status = db_nsr["config-status"]
db_nsr_update = {
+ "operational-status": "healing",
"_admin.deployed.RO.operational-status": "healing",
}
self.update_db_2("nsrs", nsr_id, db_nsr_update)
task_instantiation_info=tasks_dict_info,
stage=stage,
)
-
except (
ROclient.ROClientException,
DbException,
exc_info=True,
)
finally:
- if tasks_dict_info:
- stage[1] = "Waiting for healing pending tasks."
- self.logger.debug(logging_text + stage[1])
- exc = await self._wait_for_tasks(
+ error_list = list()
+ if db_vnfrs_list and target_list:
+ for vnfrs in db_vnfrs_list:
+ for vnf_instance in target_list:
+ if vnfrs["_id"] == vnf_instance.get("vnfInstanceId"):
+ self.db.set_list(
+ "vnfrs",
+ {"_id": vnfrs["_id"]},
+ {"_admin.modified": time()},
+ )
+ if exc:
+ error_list.append(str(exc))
+ try:
+ if tasks_dict_info:
+ stage[1] = "Waiting for healing pending tasks."
+ self.logger.debug(logging_text + stage[1])
+ exc = await self._wait_for_tasks(
+ logging_text,
+ tasks_dict_info,
+ self.timeout.ns_deploy,
+ stage,
+ nslcmop_id,
+ nsr_id=nsr_id,
+ )
+ except asyncio.CancelledError:
+ error_list.append("Cancelled")
+ await self._cancel_pending_tasks(logging_text, tasks_dict_info)
+ await self._wait_for_tasks(
logging_text,
tasks_dict_info,
self.timeout.ns_deploy,
nslcmop_id,
nsr_id=nsr_id,
)
- if exc:
+ if error_list:
+ error_detail = "; ".join(error_list)
db_nslcmop_update[
"detailed-status"
- ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
+ ] = error_description_nslcmop = "FAILED {}: {}".format(
+ step, error_detail
+ )
nslcmop_operation_state = "FAILED"
if db_nsr:
db_nsr_update["operational-status"] = old_operational_status
db_nsr_update["config-status"] = old_config_status
db_nsr_update[
"detailed-status"
- ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id, step, exc)
+ ] = "FAILED healing nslcmop={} {}: {}".format(
+ nslcmop_id, step, error_detail
+ )
for task, task_name in tasks_dict_info.items():
if not task.done() or task.cancelled() or task.exception():
if task_name.startswith(self.task_name_deploy_vca):
)
)
ee_descriptor_id = ee_item.get("id")
- if ee_item.get("juju"):
- vca_name = ee_item["juju"].get("charm")
- if get_charm_name:
- charm_name = self.find_charm_name(db_nsr, str(vca_name))
- vca_type = (
- "lxc_proxy_charm"
- if ee_item["juju"].get("charm") is not None
- else "native_charm"
- )
- if ee_item["juju"].get("cloud") == "k8s":
- vca_type = "k8s_proxy_charm"
- elif ee_item["juju"].get("proxy") is False:
- vca_type = "native_charm"
- elif ee_item.get("helm-chart"):
- vca_name = ee_item["helm-chart"]
- if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
- vca_type = "helm"
- else:
- vca_type = "helm-v3"
- else:
+ vca_name, charm_name, vca_type = self.get_vca_info(
+ ee_item, db_nsr, get_charm_name
+ )
+ if not vca_type:
self.logger.debug(
- logging_text + "skipping non juju neither charm configuration"
+ logging_text + "skipping, non juju/charm/helm configuration"
)
continue
# if SSH access is required, then get execution environment SSH public
# if native charm we have waited already to VM be UP
- if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
+ if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
pub_key = None
user = None
# self.logger.debug("get ssh key block")
:param: nslcmop_id: nslcmop ID of migrate
"""
- # Try to lock HA task here
- task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
- if not task_is_locked_by_me:
- return
logging_text = "Task ns={} vertical scale ".format(nsr_id)
- self.logger.debug(logging_text + "Enter")
+ self.logger.info(logging_text + "Enter")
+ stage = ["Preparing the environment", ""]
# get all needed from database
db_nslcmop = None
- db_nslcmop_update = {}
- nslcmop_operation_state = None
db_nsr_update = {}
target = {}
exc = None
start_deploy = time()
try:
+ db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
+ operationParams = db_nslcmop.get("operationParams")
+ vertical_scale_data = operationParams["verticalScaleVnf"]
+ vnfd_id = vertical_scale_data["vnfdId"]
+ count_index = vertical_scale_data["countIndex"]
+ vdu_id_ref = vertical_scale_data["vduId"]
+ vnfr_id = vertical_scale_data["vnfInstanceId"]
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ db_flavor = db_nsr.get("flavor")
+ db_flavor_index = str(len(db_flavor))
+
+ def set_flavor_refrence_to_vdur(diff=0):
+ """
+ Utility function to add and remove the
+ ref to new ns-flavor-id to vdurs
+ :param: diff: default 0
+ """
+ q_filter = {}
+ db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
+ for vdu_index, vdur in enumerate(db_vnfr.get("vdur", ())):
+ if (
+ vdur.get("count-index") == count_index
+ and vdur.get("vdu-id-ref") == vdu_id_ref
+ ):
+ filter_text = {
+ "_id": vnfr_id,
+ "vdur.count-index": count_index,
+ "vdur.vdu-id-ref": vdu_id_ref,
+ }
+ q_filter.update(filter_text)
+ db_update = {}
+ db_update["vdur.{}.ns-flavor-id".format(vdu_index)] = str(
+ int(db_flavor_index) - diff
+ )
+ self.db.set_one(
+ "vnfrs",
+ q_filter=q_filter,
+ update_dict=db_update,
+ fail_on_empty=True,
+ )
+
# wait for any previous tasks in process
- step = "Waiting for previous operations to terminate"
+ stage[1] = "Waiting for previous operations to terminate"
await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
self._write_ns_status(
nsr_id=nsr_id,
ns_state=None,
- current_operation="VerticalScale",
+ current_operation="VERTICALSCALE",
current_operation_id=nslcmop_id,
)
- step = "Getting nslcmop from database"
+ self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
self.logger.debug(
- step + " after having waited for previous tasks to be completed"
+ stage[1] + " after having waited for previous tasks to be completed"
)
- db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
- operationParams = db_nslcmop.get("operationParams")
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
+ virtual_compute = vnfd["virtual-compute-desc"][0]
+ virtual_memory = round(
+ float(virtual_compute["virtual-memory"]["size"]) * 1024
+ )
+ virtual_cpu = virtual_compute["virtual-cpu"]["num-virtual-cpu"]
+ virtual_storage = vnfd["virtual-storage-desc"][0]["size-of-storage"]
+ flavor_dict_update = {
+ "id": db_flavor_index,
+ "memory-mb": virtual_memory,
+ "name": f"{vdu_id_ref}-{count_index}-flv",
+ "storage-gb": str(virtual_storage),
+ "vcpu-count": virtual_cpu,
+ }
+ db_flavor.append(flavor_dict_update)
+ db_update = {}
+ db_update["flavor"] = db_flavor
+ q_filter = {
+ "_id": nsr_id,
+ }
+ # Update the VNFRS and NSRS with the requested flavour detail, So that ro tasks can function properly
+ self.db.set_one(
+ "nsrs",
+ q_filter=q_filter,
+ update_dict=db_update,
+ fail_on_empty=True,
+ )
+ set_flavor_refrence_to_vdur()
target = {}
- target.update(operationParams)
+ new_operationParams = {
+ "lcmOperationType": "verticalscale",
+ "verticalScale": "CHANGE_VNFFLAVOR",
+ "nsInstanceId": nsr_id,
+ "changeVnfFlavorData": {
+ "vnfInstanceId": vnfr_id,
+ "additionalParams": {
+ "vduid": vdu_id_ref,
+ "vduCountIndex": count_index,
+ "virtualMemory": virtual_memory,
+ "numVirtualCpu": int(virtual_cpu),
+ "sizeOfStorage": int(virtual_storage),
+ },
+ },
+ }
+ target.update(new_operationParams)
+
+ stage[1] = "Sending vertical scale request to RO... {}".format(target)
+ self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
+ self.logger.info("RO target > {}".format(target))
desc = await self.RO.vertical_scale(nsr_id, target)
- self.logger.debug("RO return > {}".format(desc))
+ self.logger.info("RO.vertical_scale return value - {}".format(desc))
action_id = desc["action_id"]
await self._wait_ng_ro(
nsr_id,
self.timeout.verticalscale,
operation="verticalscale",
)
- except (ROclient.ROClientException, DbException, LcmException) as e:
+ except (
+ NgRoException,
+ ROclient.ROClientException,
+ DbException,
+ LcmException,
+ ) as e:
self.logger.error("Exit Exception {}".format(e))
exc = e
except asyncio.CancelledError:
- self.logger.error("Cancelled Exception while '{}'".format(step))
+ self.logger.error("Cancelled Exception while '{}'".format(stage))
exc = "Operation was cancelled"
except Exception as e:
exc = traceback.format_exc()
"Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
)
finally:
- self._write_ns_status(
- nsr_id=nsr_id,
- ns_state=None,
- current_operation="IDLE",
- current_operation_id=None,
- )
if exc:
- db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
- nslcmop_operation_state = "FAILED"
+ self.logger.critical(
+ "Vertical-Scale operation Failed, cleaning up nsrs and vnfrs flavor detail"
+ )
+ self.db.set_one(
+ "nsrs",
+ {"_id": nsr_id},
+ None,
+ pull={"flavor": {"id": db_flavor_index}},
+ )
+ set_flavor_refrence_to_vdur(diff=1)
+ return "FAILED", "Error in verticalscale VNF {}".format(exc)
else:
- nslcmop_operation_state = "COMPLETED"
- db_nslcmop_update["detailed-status"] = "Done"
- db_nsr_update["detailed-status"] = "Done"
-
- self._write_op_status(
- op_id=nslcmop_id,
- stage="",
- error_message="",
- operation_state=nslcmop_operation_state,
- other_update=db_nslcmop_update,
- )
- if nslcmop_operation_state:
- try:
- msg = {
- "nsr_id": nsr_id,
- "nslcmop_id": nslcmop_id,
- "operationState": nslcmop_operation_state,
- }
- await self.msg.aiowrite("ns", "verticalscaled", msg)
- except Exception as e:
- self.logger.error(
- logging_text + "kafka_write notification Exception {}".format(e)
- )
- self.logger.debug(logging_text + "Exit")
- self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")
+ return "COMPLETED", "Done"