##
import asyncio
+import shutil
from typing import Any, Dict, List
import yaml
import logging
deep_get,
get_iterable,
populate_dict,
+ check_juju_bundle_existence,
+ get_charm_artifact_path,
)
from osm_lcm.data_utils.nsd import (
get_ns_configuration_relation_list,
get_vnf_profiles,
)
from osm_lcm.data_utils.vnfd import (
+ get_kdu,
+ get_kdu_services,
get_relation_list,
get_vdu_list,
get_vdu_profile,
get_number_of_instances,
get_juju_ee_ref,
get_kdu_resource_profile,
+ find_software_version,
)
from osm_lcm.data_utils.list_utils import find_in_list
from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index, get_kdur
from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
from osm_lcm.lcm_helm_conn import LCMHelmConn
+from osm_lcm.osm_config import OsmConfigBuilder
from osm_lcm.prometheus import parse_job
from copy import copy, deepcopy
timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
timeout_charm_delete = 10 * 60
timeout_primitive = 30 * 60 # timeout for primitive execution
+ timeout_ns_update = 30 * 60 # timeout for ns update
timeout_progress_primitive = (
10 * 60
) # timeout for some progress in a primitive execution
self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
async def _on_update_k8s_db(
- self, cluster_uuid, kdu_instance, filter=None, vca_id=None
+ self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
):
"""
Updating vca status in NSR record
:param cluster_uuid: UUID of a k8s cluster
:param kdu_instance: The unique name of the KDU instance
:param filter: To get nsr_id
+ :cluster_type: The cluster type (juju, k8s)
:return: none
"""
# self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
# .format(cluster_uuid, kdu_instance, filter))
+ nsr_id = filter.get("_id")
try:
- nsr_id = filter.get("_id")
-
- # get vca status for NS
- vca_status = await self.k8sclusterjuju.status_kdu(
- cluster_uuid,
- kdu_instance,
- complete_status=True,
+ vca_status = await self.k8scluster_map[cluster_type].status_kdu(
+ cluster_uuid=cluster_uuid,
+ kdu_instance=kdu_instance,
yaml_format=False,
+ complete_status=True,
vca_id=vca_id,
)
+
# vcaStatus
db_dict = dict()
db_dict["vcaStatus"] = {nsr_id: vca_status}
- await self.k8sclusterjuju.update_vca_status(
- db_dict["vcaStatus"],
- kdu_instance,
- vca_id=vca_id,
+ if cluster_type in ("juju-bundle", "juju"):
+ # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA
+ # status in a similar way between Juju Bundles and Helm Charts on this side
+ await self.k8sclusterjuju.update_vca_status(
+ db_dict["vcaStatus"],
+ kdu_instance,
+ vca_id=vca_id,
+ )
+
+ self.logger.debug(
+ f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
)
# write to database
self.update_db_2("nsrs", nsr_id, db_dict)
-
except (asyncio.CancelledError, asyncio.TimeoutError):
raise
except Exception as e:
:param nsr_id:
:param vnfr_id:
:param kdu_name:
- :return: IP address
+ :return: IP address, K8s services
"""
# self.logger.debug(logging_text + "Starting wait_kdu_up")
)
if kdur.get("status"):
if kdur["status"] in ("READY", "ENABLED"):
- return kdur.get("ip-address")
+ return kdur.get("ip-address"), kdur.get("services")
else:
raise LcmException(
"target KDU={} is in error state".format(kdu_name)
# wait for RO (ip-address) Insert pub_key into VM
if vnfr_id:
if kdu_name:
- rw_mgmt_ip = await self.wait_kdu_up(
+ rw_mgmt_ip, services = await self.wait_kdu_up(
logging_text, nsr_id, vnfr_id, kdu_name
)
+ vnfd = self.db.get_one(
+ "vnfds_revisions",
+ {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
+ )
+ kdu = get_kdu(vnfd, kdu_name)
+ kdu_services = [
+ service["name"] for service in get_kdu_services(kdu)
+ ]
+ exposed_services = []
+ for service in services:
+ if any(s in service["name"] for s in kdu_services):
+ exposed_services.append(service)
+ await self.vca_map[vca_type].exec_primitive(
+ ee_id=ee_id,
+ primitive_name="config",
+ params_dict={
+ "osm-config": json.dumps(
+ OsmConfigBuilder(
+ k8s={"services": exposed_services}
+ ).build()
+ )
+ },
+ vca_id=vca_id,
+ )
else:
rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
logging_text,
user=user,
pub_key=pub_key,
)
+
else:
rw_mgmt_ip = None # This is for a NS configuration
db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
vca_id = self.get_vca_id({}, db_nsr)
if db_nsr["_admin"]["deployed"]["K8s"]:
- for k8s_index, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
- cluster_uuid, kdu_instance = k8s["k8scluster-uuid"], k8s["kdu-instance"]
+ for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
+ cluster_uuid, kdu_instance, cluster_type = (
+ k8s["k8scluster-uuid"],
+ k8s["kdu-instance"],
+ k8s["k8scluster-type"],
+ )
await self._on_update_k8s_db(
- cluster_uuid, kdu_instance, filter={"_id": nsr_id}, vca_id=vca_id
+ cluster_uuid=cluster_uuid,
+ kdu_instance=kdu_instance,
+ filter={"_id": nsr_id},
+ vca_id=vca_id,
+ cluster_type=cluster_type,
)
else:
for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
actions.add(primitive["name"])
for primitive in kdu_configuration.get("config-primitive", []):
actions.add(primitive["name"])
- kdu_action = True if primitive_name in actions else False
+ kdu = find_in_list(
+ nsr_deployed["K8s"],
+ lambda kdu: kdu_name == kdu["kdu-name"]
+ and kdu["member-vnf-index"] == vnf_index,
+ )
+ kdu_action = (
+ True
+ if primitive_name in actions
+ and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
+ else False
+ )
# TODO check if ns is in a proper status
if kdu_name and (
self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
return nslcmop_operation_state, detailed_status
+ async def _ns_charm_upgrade(
+ self,
+ ee_id,
+ charm_id,
+ charm_type,
+ path,
+ timeout: float = None,
+ ) -> (str, str):
+ """This method upgrade charms in VNF instances
+
+ Args:
+ ee_id: Execution environment id
+ path: Local path to the charm
+ charm_id: charm-id
+ charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
+ timeout: (Float) Timeout for the ns update operation
+
+ Returns:
+ result: (str, str) COMPLETED/FAILED, details
+ """
+ try:
+ charm_type = charm_type or "lxc_proxy_charm"
+ output = await self.vca_map[charm_type].upgrade_charm(
+ ee_id=ee_id,
+ path=path,
+ charm_id=charm_id,
+ charm_type=charm_type,
+ timeout=timeout or self.timeout_ns_update,
+ )
+
+ if output:
+ return "COMPLETED", output
+
+ except (LcmException, asyncio.CancelledError):
+ raise
+
+ except Exception as e:
+
+ self.logger.debug("Error upgrading charm {}".format(path))
+
+ return "FAILED", "Error upgrading charm {}: {}".format(path, e)
+
+ async def update(self, nsr_id, nslcmop_id):
+ """Update NS according to different update types
+
+ This method performs upgrade of VNF instances then updates the revision
+ number in VNF record
+
+ Args:
+ nsr_id: Network service will be updated
+ nslcmop_id: ns lcm operation id
+
+ Returns:
+ It may raise DbException, LcmException, N2VCException, K8sException
+
+ """
+ # Try to lock HA task here
+ task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
+ if not task_is_locked_by_me:
+ return
+
+ logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
+ self.logger.debug(logging_text + "Enter")
+
+ # Set the required variables to be filled up later
+ db_nsr = None
+ db_nslcmop_update = {}
+ vnfr_update = {}
+ nslcmop_operation_state = None
+ db_nsr_update = {}
+ error_description_nslcmop = ""
+ exc = None
+ change_type = ""
+ detailed_status = ""
+
+ try:
+ # wait for any previous tasks in process
+ step = "Waiting for previous operations to terminate"
+ await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
+ self._write_ns_status(
+ nsr_id=nsr_id,
+ ns_state=None,
+ current_operation="UPDATING",
+ current_operation_id=nslcmop_id,
+ )
+
+ step = "Getting nslcmop from database"
+ db_nslcmop = self.db.get_one(
+ "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
+ )
+ update_type = db_nslcmop["operationParams"]["updateType"]
+
+ step = "Getting nsr from database"
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ old_operational_status = db_nsr["operational-status"]
+ db_nsr_update["operational-status"] = "updating"
+ self.update_db_2("nsrs", nsr_id, db_nsr_update)
+ nsr_deployed = db_nsr["_admin"].get("deployed")
+
+ if update_type == "CHANGE_VNFPKG":
+
+ # Get the input parameters given through update request
+ vnf_instance_id = db_nslcmop["operationParams"][
+ "changeVnfPackageData"
+ ].get("vnfInstanceId")
+
+ vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
+ "vnfdId"
+ )
+ timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
+
+ step = "Getting vnfr from database"
+ db_vnfr = self.db.get_one(
+ "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
+ )
+
+ step = "Getting vnfds from database"
+ # Latest VNFD
+ latest_vnfd = self.db.get_one(
+ "vnfds", {"_id": vnfd_id}, fail_on_empty=False
+ )
+ latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
+
+ # Current VNFD
+ current_vnf_revision = db_vnfr.get("revision", 1)
+ current_vnfd = self.db.get_one(
+ "vnfds_revisions",
+ {"_id": vnfd_id + ":" + str(current_vnf_revision)},
+ fail_on_empty=False,
+ )
+ # Charm artifact paths will be filled up later
+ (
+ current_charm_artifact_path,
+ target_charm_artifact_path,
+ charm_artifact_paths,
+ ) = ([], [], [])
+
+ step = "Checking if revision has changed in VNFD"
+ if current_vnf_revision != latest_vnfd_revision:
+
+ # There is new revision of VNFD, update operation is required
+ current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
+ latest_vnfd_path = vnfd_id
+
+ step = "Removing the VNFD packages if they exist in the local path"
+ shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
+ shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
+
+ step = "Get the VNFD packages from FSMongo"
+ self.fs.sync(from_path=latest_vnfd_path)
+ self.fs.sync(from_path=current_vnfd_path)
+
+ step = (
+ "Get the charm-type, charm-id, ee-id if there is deployed VCA"
+ )
+ base_folder = latest_vnfd["_admin"]["storage"]
+
+ for charm_index, charm_deployed in enumerate(
+ get_iterable(nsr_deployed, "VCA")
+ ):
+ vnf_index = db_vnfr.get("member-vnf-index-ref")
+
+ # Getting charm-id and charm-type
+ if charm_deployed.get("member-vnf-index") == vnf_index:
+ charm_id = self.get_vca_id(db_vnfr, db_nsr)
+ charm_type = charm_deployed.get("type")
+
+ # Getting ee-id
+ ee_id = charm_deployed.get("ee_id")
+
+ step = "Getting descriptor config"
+ descriptor_config = get_configuration(
+ current_vnfd, current_vnfd["id"]
+ )
+
+ if "execution-environment-list" in descriptor_config:
+ ee_list = descriptor_config.get(
+ "execution-environment-list", []
+ )
+ else:
+ ee_list = []
+
+ # There could be several charm used in the same VNF
+ for ee_item in ee_list:
+ if ee_item.get("juju"):
+
+ step = "Getting charm name"
+ charm_name = ee_item["juju"].get("charm")
+
+ step = "Setting Charm artifact paths"
+ current_charm_artifact_path.append(
+ get_charm_artifact_path(
+ base_folder,
+ charm_name,
+ charm_type,
+ current_vnf_revision,
+ )
+ )
+ target_charm_artifact_path.append(
+ get_charm_artifact_path(
+ base_folder,
+ charm_name,
+ charm_type,
+ )
+ )
+
+ charm_artifact_paths = zip(
+ current_charm_artifact_path, target_charm_artifact_path
+ )
+
+ step = "Checking if software version has changed in VNFD"
+ if find_software_version(current_vnfd) != find_software_version(
+ latest_vnfd
+ ):
+
+ step = "Checking if existing VNF has charm"
+ for current_charm_path, target_charm_path in list(
+ charm_artifact_paths
+ ):
+ if current_charm_path:
+ raise LcmException(
+ "Software version change is not supported as VNF instance {} has charm.".format(
+ vnf_instance_id
+ )
+ )
+
+ # There is no change in the charm package, then redeploy the VNF
+ # based on new descriptor
+ step = "Redeploying VNF"
+ # This part is in https://osm.etsi.org/gerrit/11943
+
+ else:
+ step = "Checking if any charm package has changed or not"
+ for current_charm_path, target_charm_path in list(
+ charm_artifact_paths
+ ):
+ if (
+ current_charm_path
+ and target_charm_path
+ and self.check_charm_hash_changed(
+ current_charm_path, target_charm_path
+ )
+ ):
+
+ step = "Checking whether VNF uses juju bundle"
+ if check_juju_bundle_existence(current_vnfd):
+
+ raise LcmException(
+ "Charm upgrade is not supported for the instance which"
+ " uses juju-bundle: {}".format(
+ check_juju_bundle_existence(current_vnfd)
+ )
+ )
+
+ step = "Upgrading Charm"
+ (
+ result,
+ detailed_status,
+ ) = await self._ns_charm_upgrade(
+ ee_id=ee_id,
+ charm_id=charm_id,
+ charm_type=charm_type,
+ path=self.fs.path + target_charm_path,
+ timeout=timeout_seconds,
+ )
+
+ if result == "FAILED":
+ nslcmop_operation_state = result
+ error_description_nslcmop = detailed_status
+
+ db_nslcmop_update["detailed-status"] = detailed_status
+ self.logger.debug(
+ logging_text
+ + " step {} Done with result {} {}".format(
+ step, nslcmop_operation_state, detailed_status
+ )
+ )
+
+ step = "Updating policies"
+ # This part is in https://osm.etsi.org/gerrit/11943
+
+ # If nslcmop_operation_state is None, so any operation is not failed.
+ if not nslcmop_operation_state:
+ nslcmop_operation_state = "COMPLETED"
+
+ # If update CHANGE_VNFPKG nslcmop_operation is successful
+ # vnf revision need to be updated
+ vnfr_update["revision"] = latest_vnfd_revision
+ self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
+
+ self.logger.debug(
+ logging_text
+ + " task Done with result {} {}".format(
+ nslcmop_operation_state, detailed_status
+ )
+ )
+ elif update_type == "REMOVE_VNF":
+ # This part is included in https://osm.etsi.org/gerrit/11876
+ pass
+
+ # If nslcmop_operation_state is None, so any operation is not failed.
+ # All operations are executed in overall.
+ if not nslcmop_operation_state:
+ nslcmop_operation_state = "COMPLETED"
+ db_nsr_update["operational-status"] = old_operational_status
+
+ except (DbException, LcmException, N2VCException, K8sException) as e:
+ self.logger.error(logging_text + "Exit Exception {}".format(e))
+ exc = e
+ except asyncio.CancelledError:
+ self.logger.error(
+ logging_text + "Cancelled Exception while '{}'".format(step)
+ )
+ exc = "Operation was cancelled"
+ except asyncio.TimeoutError:
+ self.logger.error(logging_text + "Timeout while '{}'".format(step))
+ exc = "Timeout"
+ except Exception as e:
+ exc = traceback.format_exc()
+ self.logger.critical(
+ logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
+ exc_info=True,
+ )
+ finally:
+ if exc:
+ db_nslcmop_update[
+ "detailed-status"
+ ] = (
+ detailed_status
+ ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
+ nslcmop_operation_state = "FAILED"
+ db_nsr_update["operational-status"] = old_operational_status
+ if db_nsr:
+ self._write_ns_status(
+ nsr_id=nsr_id,
+ ns_state=db_nsr["nsState"],
+ current_operation="IDLE",
+ current_operation_id=None,
+ other_update=db_nsr_update,
+ )
+
+ self._write_op_status(
+ op_id=nslcmop_id,
+ stage="",
+ error_message=error_description_nslcmop,
+ operation_state=nslcmop_operation_state,
+ other_update=db_nslcmop_update,
+ )
+
+ if nslcmop_operation_state:
+ try:
+ await self.msg.aiowrite(
+ "ns",
+ "updated",
+ {
+ "nsr_id": nsr_id,
+ "nslcmop_id": nslcmop_id,
+ "operationState": nslcmop_operation_state,
+ },
+ loop=self.loop,
+ )
+ except Exception as e:
+ self.logger.error(
+ logging_text + "kafka_write notification Exception {}".format(e)
+ )
+ self.logger.debug(logging_text + "Exit")
+ self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
+ return nslcmop_operation_state, detailed_status
+
async def scale(self, nsr_id, nslcmop_id):
# Try to lock HA task here
task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)