X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;ds=sidebyside;f=osm_lcm%2Fns.py;h=1714985affa0ab9180d62677078b15fdc3ce6183;hb=HEAD;hp=76f0bf5f8f4d93be4ed7b08ac3a95d63e2657811;hpb=2c8ab4dcb82ecbc207f2e2bf5dd37707bc4be73e;p=osm%2FLCM.git diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index 76f0bf5..ea57400 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -23,6 +23,7 @@ import yaml import logging import logging.handlers import traceback +import ipaddress import json from jinja2 import ( Environment, @@ -221,6 +222,18 @@ class NsLcm(LcmBase): if not isinstance(ip_mac, str): return ip_mac try: + next_ipv6 = None + next_ipv4 = None + dual_ip = ip_mac.split(";") + if len(dual_ip) == 2: + for ip in dual_ip: + if ipaddress.ip_address(ip).version == 6: + ipv6 = ipaddress.IPv6Address(ip) + next_ipv6 = str(ipaddress.IPv6Address(int(ipv6) + 1)) + elif ipaddress.ip_address(ip).version == 4: + ipv4 = ipaddress.IPv4Address(ip) + next_ipv4 = str(ipaddress.IPv4Address(int(ipv4) + 1)) + return [next_ipv4, next_ipv6] # try with ipv4 look for last dot i = ip_mac.rfind(".") if i > 0: @@ -252,14 +265,23 @@ class NsLcm(LcmBase): nsr = self.db.get_one(table="nsrs", q_filter=filter) current_ns_status = nsr.get("nsState") - # get vca status for NS + # First, we need to verify if the current vcaStatus is null, because if that is the case, + # MongoDB will not be able to create the fields used within the update key in the database + if not nsr.get("vcaStatus"): + # Write an empty dictionary to the vcaStatus field, it its value is null + self.update_db_2("nsrs", nsr_id, {"vcaStatus": dict()}) + + # Get vca status for NS status_dict = await self.n2vc.get_status( namespace="." + nsr_id, yaml_format=False, vca_id=vca_id ) - # vcaStatus + # Update the vcaStatus + db_key = f"vcaStatus.{nsr_id}.VNF" db_dict = dict() - db_dict["vcaStatus"] = status_dict + + db_dict[db_key] = status_dict[nsr_id] + await self.n2vc.update_vca_status(db_dict[db_key], vca_id=vca_id) # update configurationStatus for this VCA try: @@ -363,9 +385,27 @@ class NsLcm(LcmBase): vca_id=vca_id, ) - # vcaStatus + # First, we need to verify if the current vcaStatus is null, because if that is the case, + # MongoDB will not be able to create the fields used within the update key in the database + nsr = self.db.get_one(table="nsrs", q_filter=filter) + if not nsr.get("vcaStatus"): + # Write an empty dictionary to the vcaStatus field, it its value is null + self.update_db_2("nsrs", nsr_id, {"vcaStatus": dict()}) + + # Update the vcaStatus + db_key = f"vcaStatus.{nsr_id}.KNF" db_dict = dict() - db_dict["vcaStatus"] = {nsr_id: vca_status} + + db_dict[db_key] = vca_status + + if cluster_type in ("juju-bundle", "juju"): + # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA + # status in a similar way between Juju Bundles and Helm Charts on this side + await self.k8sclusterjuju.update_vca_status( + db_dict[db_key], + kdu_instance, + vca_id=vca_id, + ) self.logger.debug( f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}" @@ -3951,25 +3991,12 @@ class NsLcm(LcmBase): ) ) ee_descriptor_id = ee_item.get("id") - if ee_item.get("juju"): - vca_name = ee_item["juju"].get("charm") - if get_charm_name: - charm_name = self.find_charm_name(db_nsr, str(vca_name)) - vca_type = ( - "lxc_proxy_charm" - if ee_item["juju"].get("charm") is not None - else "native_charm" - ) - if ee_item["juju"].get("cloud") == "k8s": - vca_type = "k8s_proxy_charm" - elif ee_item["juju"].get("proxy") is False: - vca_type = "native_charm" - elif ee_item.get("helm-chart"): - vca_name = ee_item["helm-chart"] - vca_type = "helm-v3" - else: + vca_name, charm_name, vca_type = self.get_vca_info( + ee_item, db_nsr, get_charm_name + ) + if not vca_type: self.logger.debug( - logging_text + "skipping non juju neither charm configuration" + logging_text + "skipping, non juju/charm/helm configuration" ) continue @@ -4522,25 +4549,25 @@ class NsLcm(LcmBase): if nsr_deployed.get("VCA"): stage[1] = "Deleting all execution environments." self.logger.debug(logging_text + stage[1]) - vca_id = self.get_vca_id({}, db_nsr) - task_delete_ee = asyncio.ensure_future( - asyncio.wait_for( - self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id), - timeout=self.timeout.charm_delete, + helm_vca_list = get_deployed_vca(db_nsr, {"type": "helm-v3"}) + if helm_vca_list: + # Delete Namespace and Certificates + await self.vca_map["helm-v3"].delete_tls_certificate( + namespace=db_nslcmop["nsInstanceId"], + certificate_name=self.EE_TLS_NAME, ) - ) - # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id)) - tasks_dict_info[task_delete_ee] = "Terminating all VCA" - - # Delete Namespace and Certificates if necessary - if check_helm_ee_in_ns(list(db_vnfds_from_member_index.values())): - await self.vca_map["helm-v3"].delete_tls_certificate( - namespace=db_nslcmop["nsInstanceId"], - certificate_name=self.EE_TLS_NAME, - ) - await self.vca_map["helm-v3"].delete_namespace( - namespace=db_nslcmop["nsInstanceId"], - ) + await self.vca_map["helm-v3"].delete_namespace( + namespace=db_nslcmop["nsInstanceId"], + ) + else: + vca_id = self.get_vca_id({}, db_nsr) + task_delete_ee = asyncio.ensure_future( + asyncio.wait_for( + self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id), + timeout=self.timeout.charm_delete, + ) + ) + tasks_dict_info[task_delete_ee] = "Terminating all VCA" # Delete from k8scluster stage[1] = "Deleting KDUs." @@ -5006,7 +5033,7 @@ class NsLcm(LcmBase): vca_id=vca_id, cluster_type=cluster_type, ) - else: + if db_nsr["_admin"]["deployed"]["VCA"]: for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]): table, filter = "nsrs", {"_id": nsr_id} path = "_admin.deployed.VCA.{}.".format(vca_index) @@ -5893,6 +5920,12 @@ class NsLcm(LcmBase): ) ) + step = "Checking whether the descriptor has SFC" + if db_nsr.get("nsd", {}).get("vnffgd"): + raise LcmException( + "Ns update is not allowed for NS with SFC" + ) + # There is no change in the charm package, then redeploy the VNF # based on new descriptor step = "Redeploying VNF" @@ -6158,6 +6191,79 @@ class NsLcm(LcmBase): nslcmop_operation_state, detailed_status ) ) + elif update_type == "VERTICAL_SCALE": + self.logger.debug( + "Prepare for VERTICAL_SCALE update operation {}".format(db_nslcmop) + ) + # Get the input parameters given through update request + vnf_instance_id = db_nslcmop["operationParams"]["verticalScaleVnf"].get( + "vnfInstanceId" + ) + + vnfd_id = db_nslcmop["operationParams"]["verticalScaleVnf"].get( + "vnfdId" + ) + step = "Getting vnfr from database" + db_vnfr = self.db.get_one( + "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False + ) + self.logger.debug(step) + step = "Getting vnfds from database" + self.logger.debug("Start" + step) + # Latest VNFD + latest_vnfd = self.db.get_one( + "vnfds", {"_id": vnfd_id}, fail_on_empty=False + ) + latest_vnfd_revision = latest_vnfd["_admin"].get("revision") + # Current VNFD + current_vnf_revision = db_vnfr.get("revision", 1) + current_vnfd = self.db.get_one( + "vnfds_revisions", + {"_id": vnfd_id + ":" + str(current_vnf_revision)}, + fail_on_empty=False, + ) + self.logger.debug("End" + step) + # verify flavor changes + step = "Checking for flavor change" + if find_software_version(current_vnfd) != find_software_version( + latest_vnfd + ): + self.logger.debug("Start" + step) + if current_vnfd.get("virtual-compute-desc") == latest_vnfd.get( + "virtual-compute-desc" + ) and current_vnfd.get("virtual-storage-desc") == latest_vnfd.get( + "virtual-storage-desc" + ): + raise LcmException( + "No change in flavor check vnfd {}".format(vnfd_id) + ) + else: + raise LcmException( + "No change in software_version of vnfd {}".format(vnfd_id) + ) + + self.logger.debug("End" + step) + + (result, detailed_status) = await self.vertical_scale( + nsr_id, nslcmop_id + ) + self.logger.debug( + "vertical_scale result: {} detailed_status :{}".format( + result, detailed_status + ) + ) + if result == "FAILED": + nslcmop_operation_state = result + error_description_nslcmop = detailed_status + db_nslcmop_update["detailed-status"] = detailed_status + if not nslcmop_operation_state: + nslcmop_operation_state = "COMPLETED" + self.logger.debug( + logging_text + + " task Done with result {} {}".format( + nslcmop_operation_state, detailed_status + ) + ) # If nslcmop_operation_state is None, so any operation is not failed. # All operations are executed in overall. @@ -6273,6 +6379,10 @@ class NsLcm(LcmBase): old_operational_status = db_nsr["operational-status"] old_config_status = db_nsr["config-status"] + step = "Checking whether the descriptor has SFC" + if db_nsr.get("nsd", {}).get("vnffgd"): + raise LcmException("Scaling is not allowed for NS with SFC") + step = "Parsing scaling parameters" db_nsr_update["operational-status"] = "scaling" self.update_db_2("nsrs", nsr_id, db_nsr_update) @@ -6323,7 +6433,11 @@ class NsLcm(LcmBase): nsr_id, { "_admin.scaling-group": [ - {"name": scaling_group, "nb-scale-op": 0} + { + "name": scaling_group, + "vnf_index": vnf_index, + "nb-scale-op": 0, + } ] }, ) @@ -6332,7 +6446,10 @@ class NsLcm(LcmBase): for admin_scale_index, admin_scale_info in enumerate( db_nsr["_admin"]["scaling-group"] ): - if admin_scale_info["name"] == scaling_group: + if ( + admin_scale_info["name"] == scaling_group + and admin_scale_info["vnf_index"] == vnf_index + ): nb_scale_op = admin_scale_info.get("nb-scale-op", 0) break else: # not found, set index one plus last element and add new entry with the name @@ -6340,6 +6457,9 @@ class NsLcm(LcmBase): db_nsr_update[ "_admin.scaling-group.{}.name".format(admin_scale_index) ] = scaling_group + db_nsr_update[ + "_admin.scaling-group.{}.vnf_index".format(admin_scale_index) + ] = vnf_index vca_scaling_info = [] scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []} @@ -7892,6 +8012,7 @@ class NsLcm(LcmBase): old_config_status = db_nsr["config-status"] db_nsr_update = { + "operational-status": "healing", "_admin.deployed.RO.operational-status": "healing", } self.update_db_2("nsrs", nsr_id, db_nsr_update) @@ -8037,7 +8158,6 @@ class NsLcm(LcmBase): task_instantiation_info=tasks_dict_info, stage=stage, ) - except ( ROclient.ROClientException, DbException, @@ -8059,6 +8179,15 @@ class NsLcm(LcmBase): ) finally: error_list = list() + if db_vnfrs_list and target_list: + for vnfrs in db_vnfrs_list: + for vnf_instance in target_list: + if vnfrs["_id"] == vnf_instance.get("vnfInstanceId"): + self.db.set_list( + "vnfrs", + {"_id": vnfrs["_id"]}, + {"_admin.modified": time()}, + ) if exc: error_list.append(str(exc)) try: @@ -8284,25 +8413,12 @@ class NsLcm(LcmBase): ) ) ee_descriptor_id = ee_item.get("id") - if ee_item.get("juju"): - vca_name = ee_item["juju"].get("charm") - if get_charm_name: - charm_name = self.find_charm_name(db_nsr, str(vca_name)) - vca_type = ( - "lxc_proxy_charm" - if ee_item["juju"].get("charm") is not None - else "native_charm" - ) - if ee_item["juju"].get("cloud") == "k8s": - vca_type = "k8s_proxy_charm" - elif ee_item["juju"].get("proxy") is False: - vca_type = "native_charm" - elif ee_item.get("helm-chart"): - vca_name = ee_item["helm-chart"] - vca_type = "helm-v3" - else: + vca_name, charm_name, vca_type = self.get_vca_info( + ee_item, db_nsr, get_charm_name + ) + if not vca_type: self.logger.debug( - logging_text + "skipping non juju neither charm configuration" + logging_text + "skipping, non juju/charm/helm configuration" ) continue @@ -8829,20 +8945,11 @@ class NsLcm(LcmBase): :param: nslcmop_id: nslcmop ID of migrate """ - # Try to lock HA task here - task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id) - if not task_is_locked_by_me: - return logging_text = "Task ns={} vertical scale ".format(nsr_id) - self.logger.debug(logging_text + "Enter") + self.logger.info(logging_text + "Enter") + stage = ["Preparing the environment", ""] # get all needed from database db_nslcmop = None - db_nslcmop_update = {} - nslcmop_operation_state = None - old_db_update = {} - q_filter = {} - old_vdu_index = None - old_flavor_id = None db_nsr_update = {} target = {} exc = None @@ -8850,79 +8957,113 @@ class NsLcm(LcmBase): start_deploy = time() try: + db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id}) + operationParams = db_nslcmop.get("operationParams") + vertical_scale_data = operationParams["verticalScaleVnf"] + vnfd_id = vertical_scale_data["vnfdId"] + count_index = vertical_scale_data["countIndex"] + vdu_id_ref = vertical_scale_data["vduId"] + vnfr_id = vertical_scale_data["vnfInstanceId"] + db_nsr = self.db.get_one("nsrs", {"_id": nsr_id}) + db_flavor = db_nsr.get("flavor") + db_flavor_index = str(len(db_flavor)) + + def set_flavor_refrence_to_vdur(diff=0): + """ + Utility function to add and remove the + ref to new ns-flavor-id to vdurs + :param: diff: default 0 + """ + q_filter = {} + db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id}) + for vdu_index, vdur in enumerate(db_vnfr.get("vdur", ())): + if ( + vdur.get("count-index") == count_index + and vdur.get("vdu-id-ref") == vdu_id_ref + ): + filter_text = { + "_id": vnfr_id, + "vdur.count-index": count_index, + "vdur.vdu-id-ref": vdu_id_ref, + } + q_filter.update(filter_text) + db_update = {} + db_update["vdur.{}.ns-flavor-id".format(vdu_index)] = str( + int(db_flavor_index) - diff + ) + self.db.set_one( + "vnfrs", + q_filter=q_filter, + update_dict=db_update, + fail_on_empty=True, + ) + # wait for any previous tasks in process - step = "Waiting for previous operations to terminate" + stage[1] = "Waiting for previous operations to terminate" await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id) self._write_ns_status( nsr_id=nsr_id, ns_state=None, - current_operation="VerticalScale", + current_operation="VERTICALSCALE", current_operation_id=nslcmop_id, ) - step = "Getting nslcmop from database" + self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0) self.logger.debug( - step + " after having waited for previous tasks to be completed" + stage[1] + " after having waited for previous tasks to be completed" ) - db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id}) - operationParams = db_nslcmop.get("operationParams") - # Update the VNFRS and NSRS with the requested flavour detail, So that ro tasks can function properly - db_nsr = self.db.get_one("nsrs", {"_id": nsr_id}) - db_flavor = db_nsr.get("flavor") - db_flavor_index = str(len(db_flavor)) - change_vnf_flavor_data = operationParams["changeVnfFlavorData"] - flavor_dict = change_vnf_flavor_data["additionalParams"] - count_index = flavor_dict["vduCountIndex"] - vdu_id_ref = flavor_dict["vduid"] + self.update_db_2("nsrs", nsr_id, db_nsr_update) + vnfd = self.db.get_one("vnfds", {"_id": vnfd_id}) + virtual_compute = vnfd["virtual-compute-desc"][0] + virtual_memory = round( + float(virtual_compute["virtual-memory"]["size"]) * 1024 + ) + virtual_cpu = virtual_compute["virtual-cpu"]["num-virtual-cpu"] + virtual_storage = vnfd["virtual-storage-desc"][0]["size-of-storage"] flavor_dict_update = { "id": db_flavor_index, - "memory-mb": flavor_dict["virtualMemory"], + "memory-mb": virtual_memory, "name": f"{vdu_id_ref}-{count_index}-flv", - "storage-gb": flavor_dict["sizeOfStorage"], - "vcpu-count": flavor_dict["numVirtualCpu"], + "storage-gb": str(virtual_storage), + "vcpu-count": virtual_cpu, } db_flavor.append(flavor_dict_update) db_update = {} db_update["flavor"] = db_flavor - ns_q_filter = { + q_filter = { "_id": nsr_id, } + # Update the VNFRS and NSRS with the requested flavour detail, So that ro tasks can function properly self.db.set_one( "nsrs", - q_filter=ns_q_filter, + q_filter=q_filter, update_dict=db_update, fail_on_empty=True, ) - db_vnfr = self.db.get_one( - "vnfrs", {"_id": change_vnf_flavor_data["vnfInstanceId"]} - ) - for vdu_index, vdur in enumerate(db_vnfr.get("vdur", ())): - if ( - vdur.get("count-index") == count_index - and vdur.get("vdu-id-ref") == vdu_id_ref - ): - old_flavor_id = vdur.get("ns-flavor-id", 0) - old_vdu_index = vdu_index - filter_text = { - "_id": change_vnf_flavor_data["vnfInstanceId"], - "vdur.count-index": count_index, - "vdur.vdu-id-ref": vdu_id_ref, - } - q_filter.update(filter_text) - db_update = {} - db_update[ - "vdur.{}.ns-flavor-id".format(vdu_index) - ] = db_flavor_index - self.db.set_one( - "vnfrs", - q_filter=q_filter, - update_dict=db_update, - fail_on_empty=True, - ) + set_flavor_refrence_to_vdur() target = {} - target.update(operationParams) + new_operationParams = { + "lcmOperationType": "verticalscale", + "verticalScale": "CHANGE_VNFFLAVOR", + "nsInstanceId": nsr_id, + "changeVnfFlavorData": { + "vnfInstanceId": vnfr_id, + "additionalParams": { + "vduid": vdu_id_ref, + "vduCountIndex": count_index, + "virtualMemory": virtual_memory, + "numVirtualCpu": int(virtual_cpu), + "sizeOfStorage": int(virtual_storage), + }, + }, + } + target.update(new_operationParams) + + stage[1] = "Sending vertical scale request to RO... {}".format(target) + self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0) + self.logger.info("RO target > {}".format(target)) desc = await self.RO.vertical_scale(nsr_id, target) - self.logger.debug("RO return > {}".format(desc)) + self.logger.info("RO.vertical_scale return value - {}".format(desc)) action_id = desc["action_id"] await self._wait_ng_ro( nsr_id, @@ -8941,7 +9082,7 @@ class NsLcm(LcmBase): self.logger.error("Exit Exception {}".format(e)) exc = e except asyncio.CancelledError: - self.logger.error("Cancelled Exception while '{}'".format(step)) + self.logger.error("Cancelled Exception while '{}'".format(stage)) exc = "Operation was cancelled" except Exception as e: exc = traceback.format_exc() @@ -8949,51 +9090,17 @@ class NsLcm(LcmBase): "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True ) finally: - self._write_ns_status( - nsr_id=nsr_id, - ns_state=None, - current_operation="IDLE", - current_operation_id=None, - ) if exc: - db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc) - nslcmop_operation_state = "FAILED" - old_db_update[ - "vdur.{}.ns-flavor-id".format(old_vdu_index) - ] = old_flavor_id - else: - nslcmop_operation_state = "COMPLETED" - db_nslcmop_update["detailed-status"] = "Done" - db_nsr_update["detailed-status"] = "Done" - - self._write_op_status( - op_id=nslcmop_id, - stage="", - error_message="", - operation_state=nslcmop_operation_state, - other_update=db_nslcmop_update, - ) - if old_vdu_index and old_db_update != {}: self.logger.critical( - "Reverting Old Flavor -- : {}".format(old_db_update) + "Vertical-Scale operation Failed, cleaning up nsrs and vnfrs flavor detail" ) self.db.set_one( - "vnfrs", - q_filter=q_filter, - update_dict=old_db_update, - fail_on_empty=True, + "nsrs", + {"_id": nsr_id}, + None, + pull={"flavor": {"id": db_flavor_index}}, ) - if nslcmop_operation_state: - try: - msg = { - "nsr_id": nsr_id, - "nslcmop_id": nslcmop_id, - "operationState": nslcmop_operation_state, - } - await self.msg.aiowrite("ns", "verticalscaled", msg) - except Exception as e: - self.logger.error( - logging_text + "kafka_write notification Exception {}".format(e) - ) - self.logger.debug(logging_text + "Exit") - self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale") + set_flavor_refrence_to_vdur(diff=1) + return "FAILED", "Error in verticalscale VNF {}".format(exc) + else: + return "COMPLETED", "Done"