# limitations under the License.
# import logging
+import json
from uuid import uuid4
from http import HTTPStatus
from time import time
from copy import copy, deepcopy
-from osm_nbi.validation import validate_input, ValidationError, ns_instantiate, ns_terminate, ns_action, ns_scale,\
- nsi_instantiate
-from osm_nbi.base_topic import BaseTopic, EngineException, get_iterable, deep_get, increment_ip_mac
+from osm_nbi.validation import (
+ validate_input,
+ ValidationError,
+ ns_instantiate,
+ ns_terminate,
+ ns_action,
+ ns_scale,
+ ns_update,
+ ns_heal,
+ nsi_instantiate,
+ ns_migrate,
+ ns_verticalscale,
+)
+from osm_nbi.base_topic import (
+ BaseTopic,
+ EngineException,
+ get_iterable,
+ deep_get,
+ increment_ip_mac,
+)
from yaml import safe_dump
from osm_common.dbbase import DbException
from osm_common.msgbase import MsgException
from osm_common.fsbase import FsException
from osm_nbi import utils
-from re import match # For checking that additional parameter names are valid Jinja2 identifiers
+from re import (
+ match,
+) # For checking that additional parameter names are valid Jinja2 identifiers
__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
return
nsd_id = descriptor["nsdId"]
if not self.get_item_list(session, "nsds", {"id": nsd_id}):
- raise EngineException("Descriptor error at nsdId='{}' references a non exist nsd".format(nsd_id),
- http_code=HTTPStatus.CONFLICT)
+ raise EngineException(
+ "Descriptor error at nsdId='{}' references a non exist nsd".format(
+ nsd_id
+ ),
+ http_code=HTTPStatus.CONFLICT,
+ )
@staticmethod
def format_on_new(content, project_id=None, make_public=False):
return
nsr = db_content
if nsr["_admin"].get("nsState") == "INSTANTIATED":
- raise EngineException("nsr '{}' cannot be deleted because it is in 'INSTANTIATED' state. "
- "Launch 'terminate' operation first; or force deletion".format(_id),
- http_code=HTTPStatus.CONFLICT)
+ raise EngineException(
+ "nsr '{}' cannot be deleted because it is in 'INSTANTIATED' state. "
+ "Launch 'terminate' operation first; or force deletion".format(_id),
+ http_code=HTTPStatus.CONFLICT,
+ )
def delete_extra(self, session, _id, db_content, not_send_msg=None):
"""
self.db.del_list("vnfrs", {"nsr-id-ref": _id})
# set all used pdus as free
- self.db.set_list("pdus", {"_admin.usage.nsr_id": _id},
- {"_admin.usageState": "NOT_IN_USE", "_admin.usage": None})
+ self.db.set_list(
+ "pdus",
+ {"_admin.usage.nsr_id": _id},
+ {"_admin.usageState": "NOT_IN_USE", "_admin.usage": None},
+ )
# Set NSD usageState
nsr = db_content
used_nsd_id = nsr.get("nsd-id")
if used_nsd_id:
# check if used by another NSR
- nsrs_list = self.db.get_one("nsrs", {"nsd-id": used_nsd_id},
- fail_on_empty=False, fail_on_more=False)
+ nsrs_list = self.db.get_one(
+ "nsrs", {"nsd-id": used_nsd_id}, fail_on_empty=False, fail_on_more=False
+ )
if not nsrs_list:
- self.db.set_one("nsds", {"_id": used_nsd_id}, {"_admin.usageState": "NOT_IN_USE"})
+ self.db.set_one(
+ "nsds", {"_id": used_nsd_id}, {"_admin.usageState": "NOT_IN_USE"}
+ )
# Set VNFD usageState
used_vnfd_id_list = nsr.get("vnfd-id")
if used_vnfd_id_list:
for used_vnfd_id in used_vnfd_id_list:
# check if used by another NSR
- nsrs_list = self.db.get_one("nsrs", {"vnfd-id": used_vnfd_id},
- fail_on_empty=False, fail_on_more=False)
+ nsrs_list = self.db.get_one(
+ "nsrs",
+ {"vnfd-id": used_vnfd_id},
+ fail_on_empty=False,
+ fail_on_more=False,
+ )
if not nsrs_list:
- self.db.set_one("vnfds", {"_id": used_vnfd_id}, {"_admin.usageState": "NOT_IN_USE"})
+ self.db.set_one(
+ "vnfds",
+ {"_id": used_vnfd_id},
+ {"_admin.usageState": "NOT_IN_USE"},
+ )
# delete extra ro_nsrs used for internal RO module
self.db.del_one("ro_nsrs", q_filter={"_id": _id}, fail_on_empty=False)
return formated_request
@staticmethod
- def _format_additional_params(ns_request, member_vnf_index=None, vdu_id=None, kdu_name=None, descriptor=None):
+ def _format_additional_params(
+ ns_request, member_vnf_index=None, vdu_id=None, kdu_name=None, descriptor=None
+ ):
"""
Get and format user additional params for NS or VNF
:param ns_request: User instantiation additional parameters
additional_params = copy(ns_request.get("additionalParamsForNs"))
where_ = "additionalParamsForNs"
elif ns_request.get("additionalParamsForVnf"):
- where_ = "additionalParamsForVnf[member-vnf-index={}]".format(member_vnf_index)
- item = next((x for x in ns_request["additionalParamsForVnf"] if x["member-vnf-index"] == member_vnf_index),
- None)
+ where_ = "additionalParamsForVnf[member-vnf-index={}]".format(
+ member_vnf_index
+ )
+ item = next(
+ (
+ x
+ for x in ns_request["additionalParamsForVnf"]
+ if x["member-vnf-index"] == member_vnf_index
+ ),
+ None,
+ )
if item:
if not vdu_id and not kdu_name:
other_params = item
additional_params = copy(item.get("additionalParams")) or {}
if vdu_id and item.get("additionalParamsForVdu"):
- item_vdu = next((x for x in item["additionalParamsForVdu"] if x["vdu_id"] == vdu_id), None)
+ item_vdu = next(
+ (
+ x
+ for x in item["additionalParamsForVdu"]
+ if x["vdu_id"] == vdu_id
+ ),
+ None,
+ )
other_params = item_vdu
if item_vdu and item_vdu.get("additionalParams"):
where_ += ".additionalParamsForVdu[vdu_id={}]".format(vdu_id)
if kdu_name:
additional_params = {}
if item.get("additionalParamsForKdu"):
- item_kdu = next((x for x in item["additionalParamsForKdu"] if x["kdu_name"] == kdu_name), None)
+ item_kdu = next(
+ (
+ x
+ for x in item["additionalParamsForKdu"]
+ if x["kdu_name"] == kdu_name
+ ),
+ None,
+ )
other_params = item_kdu
if item_kdu and item_kdu.get("additionalParams"):
- where_ += ".additionalParamsForKdu[kdu_name={}]".format(kdu_name)
+ where_ += ".additionalParamsForKdu[kdu_name={}]".format(
+ kdu_name
+ )
additional_params = item_kdu["additionalParams"]
if additional_params:
for k, v in additional_params.items():
# BEGIN Check that additional parameter names are valid Jinja2 identifiers if target is not Kdu
- if not kdu_name and not match('^[a-zA-Z_][a-zA-Z0-9_]*$', k):
- raise EngineException("Invalid param name at {}:{}. Must contain only alphanumeric characters "
- "and underscores, and cannot start with a digit"
- .format(where_, k))
+ if not kdu_name and not match("^[a-zA-Z_][a-zA-Z0-9_]*$", k):
+ raise EngineException(
+ "Invalid param name at {}:{}. Must contain only alphanumeric characters "
+ "and underscores, and cannot start with a digit".format(
+ where_, k
+ )
+ )
# END Check that additional parameter names are valid Jinja2 identifiers
if not isinstance(k, str):
- raise EngineException("Invalid param at {}:{}. Only string keys are allowed".format(where_, k))
- if "." in k or "$" in k:
- raise EngineException("Invalid param at {}:{}. Keys must not contain dots or $".format(where_, k))
+ raise EngineException(
+ "Invalid param at {}:{}. Only string keys are allowed".format(
+ where_, k
+ )
+ )
+ if "$" in k:
+ raise EngineException(
+ "Invalid param at {}:{}. Keys must not contain $ symbol".format(
+ where_, k
+ )
+ )
if isinstance(v, (dict, tuple, list)):
additional_params[k] = "!!yaml " + safe_dump(v)
+ if kdu_name:
+ additional_params = json.dumps(additional_params)
if descriptor:
for df in descriptor.get("df", []):
# TODO: check for cloud-init
if member_vnf_index:
initial_primitives = []
- if "lcm-operations-configuration" in df \
- and "operate-vnf-op-config" in df["lcm-operations-configuration"]:
- for config in df["lcm-operations-configuration"]["operate-vnf-op-config"].get("day1-2", []):
- for primitive in get_iterable(config.get("initial-config-primitive")):
+ if (
+ "lcm-operations-configuration" in df
+ and "operate-vnf-op-config"
+ in df["lcm-operations-configuration"]
+ ):
+ for config in df["lcm-operations-configuration"][
+ "operate-vnf-op-config"
+ ].get("day1-2", []):
+ for primitive in get_iterable(
+ config.get("initial-config-primitive")
+ ):
initial_primitives.append(primitive)
else:
- initial_primitives = deep_get(descriptor, ("ns-configuration", "initial-config-primitive"))
+ initial_primitives = deep_get(
+ descriptor, ("ns-configuration", "initial-config-primitive")
+ )
for initial_primitive in get_iterable(initial_primitives):
for param in get_iterable(initial_primitive.get("parameter")):
- if param["value"].startswith("<") and param["value"].endswith(">"):
- if param["value"] in ("<rw_mgmt_ip>", "<VDU_SCALE_INFO>", "<ns_config_info>"):
+ if param["value"].startswith("<") and param["value"].endswith(
+ ">"
+ ):
+ if param["value"] in (
+ "<rw_mgmt_ip>",
+ "<VDU_SCALE_INFO>",
+ "<ns_config_info>",
+ ):
continue
- if not additional_params or param["value"][1:-1] not in additional_params:
- raise EngineException("Parameter '{}' needed for vnfd[id={}]:day1-2 configuration:"
- "initial-config-primitive[name={}] not supplied".
- format(param["value"], descriptor["id"],
- initial_primitive["name"]))
+ if (
+ not additional_params
+ or param["value"][1:-1] not in additional_params
+ ):
+ raise EngineException(
+ "Parameter '{}' needed for vnfd[id={}]:day1-2 configuration:"
+ "initial-config-primitive[name={}] not supplied".format(
+ param["value"],
+ descriptor["id"],
+ initial_primitive["name"],
+ )
+ )
return additional_params or None, other_params or None
step = "filling nsr from input data"
nsr_id = str(uuid4())
- nsr_descriptor = self._create_nsr_descriptor_from_nsd(nsd, ns_request, nsr_id)
+ nsr_descriptor = self._create_nsr_descriptor_from_nsd(
+ nsd, ns_request, nsr_id, session
+ )
# Create VNFRs
needed_vnfds = {}
# TODO: Change for multiple df support
- vnf_profiles = nsd.get("df", [[]])[0].get("vnf-profile", ())
+ vnf_profiles = nsd.get("df", [{}])[0].get("vnf-profile", ())
for vnfp in vnf_profiles:
vnfd_id = vnfp.get("vnfd-id")
vnf_index = vnfp.get("id")
- step = "getting vnfd id='{}' constituent-vnfd='{}' from database".format(vnfd_id, vnf_index)
+ step = (
+ "getting vnfd id='{}' constituent-vnfd='{}' from database".format(
+ vnfd_id, vnf_index
+ )
+ )
if vnfd_id not in needed_vnfds:
vnfd = self._get_vnfd_from_db(vnfd_id, session)
+ if "revision" in vnfd["_admin"]:
+ vnfd["revision"] = vnfd["_admin"]["revision"]
+ vnfd.pop("_admin")
needed_vnfds[vnfd_id] = vnfd
nsr_descriptor["vnfd-id"].append(vnfd["_id"])
else:
vnfd = needed_vnfds[vnfd_id]
- step = "filling vnfr vnfd-id='{}' constituent-vnfd='{}'".format(vnfd_id, vnf_index)
- vnfr_descriptor = self._create_vnfr_descriptor_from_vnfd(nsd, vnfd, vnfd_id, vnf_index, nsr_descriptor,
- ns_request, ns_k8s_namespace)
+ step = "filling vnfr vnfd-id='{}' constituent-vnfd='{}'".format(
+ vnfd_id, vnf_index
+ )
+ vnfr_descriptor = self._create_vnfr_descriptor_from_vnfd(
+ nsd,
+ vnfd,
+ vnfd_id,
+ vnf_index,
+ nsr_descriptor,
+ ns_request,
+ ns_k8s_namespace,
+ )
- step = "creating vnfr vnfd-id='{}' constituent-vnfd='{}' at database".format(vnfd_id, vnf_index)
+ step = "creating vnfr vnfd-id='{}' constituent-vnfd='{}' at database".format(
+ vnfd_id, vnf_index
+ )
self._add_vnfr_to_db(vnfr_descriptor, rollback, session)
nsr_descriptor["constituent-vnfr-ref"].append(vnfr_descriptor["id"])
self.fs.mkdir(nsr_id)
return nsr_id, None
- except (ValidationError, EngineException, DbException, MsgException, FsException) as e:
+ except (
+ ValidationError,
+ EngineException,
+ DbException,
+ MsgException,
+ FsException,
+ ) as e:
raise type(e)("{} while '{}'".format(e, step), http_code=e.http_code)
def _get_nsd_from_db(self, nsd_id, session):
_filter = self._get_project_filter(session)
_filter["id"] = vnfd_id
vnfd = self.db.get_one("vnfds", _filter, fail_on_empty=True, fail_on_more=True)
- vnfd.pop("_admin")
return vnfd
def _add_nsr_to_db(self, nsr_descriptor, rollback, session):
- self.format_on_new(nsr_descriptor, session["project_id"], make_public=session["public"])
+ self.format_on_new(
+ nsr_descriptor, session["project_id"], make_public=session["public"]
+ )
self.db.create("nsrs", nsr_descriptor)
rollback.append({"topic": "nsrs", "_id": nsr_descriptor["id"]})
def _add_vnfr_to_db(self, vnfr_descriptor, rollback, session):
- self.format_on_new(vnfr_descriptor, session["project_id"], make_public=session["public"])
+ self.format_on_new(
+ vnfr_descriptor, session["project_id"], make_public=session["public"]
+ )
self.db.create("vnfrs", vnfr_descriptor)
rollback.append({"topic": "vnfrs", "_id": vnfr_descriptor["id"]})
def _check_nsd_operational_state(self, nsd, ns_request):
if nsd["_admin"]["operationalState"] == "DISABLED":
- raise EngineException("nsd with id '{}' is DISABLED, and thus cannot be used to create "
- "a network service".format(ns_request["nsdId"]), http_code=HTTPStatus.CONFLICT)
+ raise EngineException(
+ "nsd with id '{}' is DISABLED, and thus cannot be used to create "
+ "a network service".format(ns_request["nsdId"]),
+ http_code=HTTPStatus.CONFLICT,
+ )
def _get_ns_k8s_namespace(self, nsd, ns_request, session):
- additional_params, _ = self._format_additional_params(ns_request, descriptor=nsd)
+ additional_params, _ = self._format_additional_params(
+ ns_request, descriptor=nsd
+ )
# use for k8s-namespace from ns_request or additionalParamsForNs. By default, the project_id
ns_k8s_namespace = session["project_id"][0] if session["project_id"] else None
if ns_request and ns_request.get("k8s-namespace"):
return ns_k8s_namespace
- def _create_nsr_descriptor_from_nsd(self, nsd, ns_request, nsr_id):
+ def _add_flavor_to_nsr(self, vdu, vnfd, nsr_descriptor):
+ flavor_data = {}
+ guest_epa = {}
+ # Find this vdu compute and storage descriptors
+ vdu_virtual_compute = {}
+ vdu_virtual_storage = {}
+ for vcd in vnfd.get("virtual-compute-desc", ()):
+ if vcd.get("id") == vdu.get("virtual-compute-desc"):
+ vdu_virtual_compute = vcd
+ for vsd in vnfd.get("virtual-storage-desc", ()):
+ if vsd.get("id") == vdu.get("virtual-storage-desc", [[]])[0]:
+ vdu_virtual_storage = vsd
+ # Get this vdu vcpus, memory and storage info for flavor_data
+ if vdu_virtual_compute.get("virtual-cpu", {}).get(
+ "num-virtual-cpu"
+ ):
+ flavor_data["vcpu-count"] = vdu_virtual_compute["virtual-cpu"][
+ "num-virtual-cpu"
+ ]
+ if vdu_virtual_compute.get("virtual-memory", {}).get("size"):
+ flavor_data["memory-mb"] = (
+ float(vdu_virtual_compute["virtual-memory"]["size"])
+ * 1024.0
+ )
+ if vdu_virtual_storage.get("size-of-storage"):
+ flavor_data["storage-gb"] = vdu_virtual_storage[
+ "size-of-storage"
+ ]
+ # Get this vdu EPA info for guest_epa
+ if vdu_virtual_compute.get("virtual-cpu", {}).get("cpu-quota"):
+ guest_epa["cpu-quota"] = vdu_virtual_compute["virtual-cpu"][
+ "cpu-quota"
+ ]
+ if vdu_virtual_compute.get("virtual-cpu", {}).get("pinning"):
+ vcpu_pinning = vdu_virtual_compute["virtual-cpu"]["pinning"]
+ if vcpu_pinning.get("thread-policy"):
+ guest_epa["cpu-thread-pinning-policy"] = vcpu_pinning[
+ "thread-policy"
+ ]
+ if vcpu_pinning.get("policy"):
+ cpu_policy = (
+ "SHARED"
+ if vcpu_pinning["policy"] == "dynamic"
+ else "DEDICATED"
+ )
+ guest_epa["cpu-pinning-policy"] = cpu_policy
+ if vdu_virtual_compute.get("virtual-memory", {}).get("mem-quota"):
+ guest_epa["mem-quota"] = vdu_virtual_compute["virtual-memory"][
+ "mem-quota"
+ ]
+ if vdu_virtual_compute.get("virtual-memory", {}).get(
+ "mempage-size"
+ ):
+ guest_epa["mempage-size"] = vdu_virtual_compute[
+ "virtual-memory"
+ ]["mempage-size"]
+ if vdu_virtual_compute.get("virtual-memory", {}).get(
+ "numa-node-policy"
+ ):
+ guest_epa["numa-node-policy"] = vdu_virtual_compute[
+ "virtual-memory"
+ ]["numa-node-policy"]
+ if vdu_virtual_storage.get("disk-io-quota"):
+ guest_epa["disk-io-quota"] = vdu_virtual_storage[
+ "disk-io-quota"
+ ]
+
+ if guest_epa:
+ flavor_data["guest-epa"] = guest_epa
+
+ flavor_data["name"] = vdu["id"][:56] + "-flv"
+ flavor_data["id"] = str(len(nsr_descriptor["flavor"]))
+ nsr_descriptor["flavor"].append(flavor_data)
+
+ def _create_nsr_descriptor_from_nsd(self, nsd, ns_request, nsr_id, session):
now = time()
- additional_params, _ = self._format_additional_params(ns_request, descriptor=nsd)
+ additional_params, _ = self._format_additional_params(
+ ns_request, descriptor=nsd
+ )
nsr_descriptor = {
"name": ns_request["nsName"],
"ssh-authorized-key": ns_request.get("ssh_keys"), # TODO remove
"flavor": [],
"image": [],
+ "affinity-or-anti-affinity-group": [],
}
+ if "revision" in nsd["_admin"]:
+ nsr_descriptor["revision"] = nsd["_admin"]["revision"]
+
ns_request["nsr_id"] = nsr_id
if ns_request and ns_request.get("config-units"):
nsr_descriptor["config-units"] = ns_request["config-units"]
-
# Create vld
if nsd.get("virtual-link-desc"):
nsr_vld = deepcopy(nsd.get("virtual-link-desc", []))
for vnf_profile in vnf_profiles:
for vlc in vnf_profile.get("virtual-link-connectivity", ()):
for cpd in vlc.get("constituent-cpd-id", ()):
- all_vld_connection_point_data[vlc.get("virtual-link-profile-id")].append({
- "member-vnf-index-ref": cpd.get("constituent-base-element-id"),
- "vnfd-connection-point-ref": cpd.get("constituent-cpd-id"),
- "vnfd-id-ref": vnf_profile.get("vnfd-id")
- })
-
- vnfd = self.db.get_one("vnfds",
- {"id": vnf_profile.get("vnfd-id")},
- fail_on_empty=True,
- fail_on_more=True)
+ all_vld_connection_point_data[
+ vlc.get("virtual-link-profile-id")
+ ].append(
+ {
+ "member-vnf-index-ref": cpd.get(
+ "constituent-base-element-id"
+ ),
+ "vnfd-connection-point-ref": cpd.get(
+ "constituent-cpd-id"
+ ),
+ "vnfd-id-ref": vnf_profile.get("vnfd-id"),
+ }
+ )
+
+ vnfd = self._get_vnfd_from_db(vnf_profile.get("vnfd-id"), session)
+ vnfd.pop("_admin")
for vdu in vnfd.get("vdu", ()):
- flavor_data = {}
- guest_epa = {}
- # Find this vdu compute and storage descriptors
- vdu_virtual_compute = {}
- vdu_virtual_storage = {}
- for vcd in vnfd.get("virtual-compute-desc", ()):
- if vcd.get("id") == vdu.get("virtual-compute-desc"):
- vdu_virtual_compute = vcd
- for vsd in vnfd.get("virtual-storage-desc", ()):
- if vsd.get("id") == vdu.get("virtual-storage-desc", [[]])[0]:
- vdu_virtual_storage = vsd
- # Get this vdu vcpus, memory and storage info for flavor_data
- if vdu_virtual_compute.get("virtual-cpu", {}).get("num-virtual-cpu"):
- flavor_data["vcpu-count"] = vdu_virtual_compute["virtual-cpu"]["num-virtual-cpu"]
- if vdu_virtual_compute.get("virtual-memory", {}).get("size"):
- flavor_data["memory-mb"] = float(vdu_virtual_compute["virtual-memory"]["size"]) * 1024.0
- if vdu_virtual_storage.get("size-of-storage"):
- flavor_data["storage-gb"] = vdu_virtual_storage["size-of-storage"]
- # Get this vdu EPA info for guest_epa
- if vdu_virtual_compute.get("virtual-cpu", {}).get("cpu-quota"):
- guest_epa["cpu-quota"] = vdu_virtual_compute["virtual-cpu"]["cpu-quota"]
- if vdu_virtual_compute.get("virtual-cpu", {}).get("pinning"):
- vcpu_pinning = vdu_virtual_compute["virtual-cpu"]["pinning"]
- if vcpu_pinning.get("thread-policy"):
- guest_epa["cpu-thread-pinning-policy"] = vcpu_pinning["thread-policy"]
- if vcpu_pinning.get("policy"):
- cpu_policy = "SHARED" if vcpu_pinning["policy"] == "dynamic" else "DEDICATED"
- guest_epa["cpu-pinning-policy"] = cpu_policy
- if vdu_virtual_compute.get("virtual-memory", {}).get("mem-quota"):
- guest_epa["mem-quota"] = vdu_virtual_compute["virtual-memory"]["mem-quota"]
- if vdu_virtual_compute.get("virtual-memory", {}).get("mempage-size"):
- guest_epa["mempage-size"] = vdu_virtual_compute["virtual-memory"]["mempage-size"]
- if vdu_virtual_compute.get("virtual-memory", {}).get("numa-node-policy"):
- guest_epa["numa-node-policy"] = vdu_virtual_compute["virtual-memory"]["numa-node-policy"]
- if vdu_virtual_storage.get("disk-io-quota"):
- guest_epa["disk-io-quota"] = vdu_virtual_storage["disk-io-quota"]
-
- if guest_epa:
- flavor_data["guest-epa"] = guest_epa
-
- flavor_data["name"] = vdu["id"][:56] + "-flv"
- flavor_data["id"] = str(len(nsr_descriptor["flavor"]))
- nsr_descriptor["flavor"].append(flavor_data)
-
+ self._add_flavor_to_nsr(vdu, vnfd, nsr_descriptor)
sw_image_id = vdu.get("sw-image-desc")
if sw_image_id:
- sw_image_desc = utils.find_in_list(vnfd.get("sw-image-desc", ()),
- lambda sw: sw["id"] == sw_image_id)
- image_data = {}
- if sw_image_desc.get("image"):
- image_data["image"] = sw_image_desc["image"]
- if sw_image_desc.get("checksum"):
- image_data["image_checksum"] = sw_image_desc["checksum"]["hash"]
- img = next((f for f in nsr_descriptor["image"] if
- all(f.get(k) == image_data[k] for k in image_data)), None)
- if not img:
- image_data["id"] = str(len(nsr_descriptor["image"]))
- nsr_descriptor["image"].append(image_data)
+ image_data = self._get_image_data_from_vnfd(vnfd, sw_image_id)
+ self._add_image_to_nsr(nsr_descriptor, image_data)
+
+ # also add alternative images to the list of images
+ for alt_image in vdu.get("alternative-sw-image-desc", ()):
+ image_data = self._get_image_data_from_vnfd(vnfd, alt_image)
+ self._add_image_to_nsr(nsr_descriptor, image_data)
+
+ # Add Affinity or Anti-affinity group information to NSR
+ vdu_profiles = vnfd.get("df", [[]])[0].get("vdu-profile", ())
+ affinity_group_prefix_name = "{}-{}".format(
+ nsr_descriptor["name"][:16], vnf_profile.get("id")[:16]
+ )
+
+ for vdu_profile in vdu_profiles:
+ affinity_group_data = {}
+ for affinity_group in vdu_profile.get(
+ "affinity-or-anti-affinity-group", ()
+ ):
+ affinity_group_data = (
+ self._get_affinity_or_anti_affinity_group_data_from_vnfd(
+ vnfd, affinity_group["id"]
+ )
+ )
+ affinity_group_data["member-vnf-index"] = vnf_profile.get("id")
+ self._add_affinity_or_anti_affinity_group_to_nsr(
+ nsr_descriptor,
+ affinity_group_data,
+ affinity_group_prefix_name,
+ )
for vld in nsr_vld:
- vld["vnfd-connection-point-ref"] = all_vld_connection_point_data.get(vld.get("id"), [])
+ vld["vnfd-connection-point-ref"] = all_vld_connection_point_data.get(
+ vld.get("id"), []
+ )
vld["name"] = vld["id"]
nsr_descriptor["vld"] = nsr_vld
return nsr_descriptor
- def _create_vnfr_descriptor_from_vnfd(self, nsd, vnfd, vnfd_id, vnf_index, nsr_descriptor,
- ns_request, ns_k8s_namespace):
+ def _get_affinity_or_anti_affinity_group_data_from_vnfd(
+ self, vnfd, affinity_group_id
+ ):
+ """
+ Gets affinity-or-anti-affinity-group info from df and returns the desired affinity group
+ """
+ affinity_group = utils.find_in_list(
+ vnfd.get("df", [[]])[0].get("affinity-or-anti-affinity-group", ()),
+ lambda ag: ag["id"] == affinity_group_id,
+ )
+ affinity_group_data = {}
+ if affinity_group:
+ if affinity_group.get("id"):
+ affinity_group_data["ag-id"] = affinity_group["id"]
+ if affinity_group.get("type"):
+ affinity_group_data["type"] = affinity_group["type"]
+ if affinity_group.get("scope"):
+ affinity_group_data["scope"] = affinity_group["scope"]
+ return affinity_group_data
+
+ def _add_affinity_or_anti_affinity_group_to_nsr(
+ self, nsr_descriptor, affinity_group_data, affinity_group_prefix_name
+ ):
+ """
+ Adds affinity-or-anti-affinity-group to nsr checking first it is not already added
+ """
+ affinity_group = next(
+ (
+ f
+ for f in nsr_descriptor["affinity-or-anti-affinity-group"]
+ if all(f.get(k) == affinity_group_data[k] for k in affinity_group_data)
+ ),
+ None,
+ )
+ if not affinity_group:
+ affinity_group_data["id"] = str(
+ len(nsr_descriptor["affinity-or-anti-affinity-group"])
+ )
+ affinity_group_data["name"] = "{}-{}".format(
+ affinity_group_prefix_name, affinity_group_data["ag-id"][:32]
+ )
+ nsr_descriptor["affinity-or-anti-affinity-group"].append(
+ affinity_group_data
+ )
+
+ def _get_image_data_from_vnfd(self, vnfd, sw_image_id):
+ sw_image_desc = utils.find_in_list(
+ vnfd.get("sw-image-desc", ()), lambda sw: sw["id"] == sw_image_id
+ )
+ image_data = {}
+ if sw_image_desc.get("image"):
+ image_data["image"] = sw_image_desc["image"]
+ if sw_image_desc.get("checksum"):
+ image_data["image_checksum"] = sw_image_desc["checksum"]["hash"]
+ if sw_image_desc.get("vim-type"):
+ image_data["vim-type"] = sw_image_desc["vim-type"]
+ return image_data
+
+ def _add_image_to_nsr(self, nsr_descriptor, image_data):
+ """
+ Adds image to nsr checking first it is not already added
+ """
+ img = next(
+ (
+ f
+ for f in nsr_descriptor["image"]
+ if all(f.get(k) == image_data[k] for k in image_data)
+ ),
+ None,
+ )
+ if not img:
+ image_data["id"] = str(len(nsr_descriptor["image"]))
+ nsr_descriptor["image"].append(image_data)
+
+ def _create_vnfr_descriptor_from_vnfd(
+ self,
+ nsd,
+ vnfd,
+ vnfd_id,
+ vnf_index,
+ nsr_descriptor,
+ ns_request,
+ ns_k8s_namespace,
+ ):
vnfr_id = str(uuid4())
nsr_id = nsr_descriptor["id"]
now = time()
- additional_params, vnf_params = self._format_additional_params(ns_request, vnf_index, descriptor=vnfd)
+ additional_params, vnf_params = self._format_additional_params(
+ ns_request, vnf_index, descriptor=vnfd
+ )
vnfr_descriptor = {
"id": vnfr_id,
"vnfd-ref": vnfd_id,
"vnfd-id": vnfd["_id"], # not at OSM model, but useful
"vim-account-id": None,
+ "vca-id": None,
"vdur": [],
"connection-point": [],
"ip-address": None, # mgmt-interface filled by LCM
}
+
+ # Revision backwards compatility. Only specify the revision in the record if
+ # the original VNFD has a revision.
+ if "revision" in vnfd:
+ vnfr_descriptor["revision"] = vnfd["revision"]
+
+
vnf_k8s_namespace = ns_k8s_namespace
if vnf_params:
if vnf_params.get("k8s-namespace"):
all_k8s_cluster_nets_cpds = {}
for cpd in get_iterable(vnfd.get("ext-cpd")):
if cpd.get("k8s-cluster-net"):
- all_k8s_cluster_nets_cpds[cpd.get("k8s-cluster-net")] = cpd.get("id")
+ all_k8s_cluster_nets_cpds[cpd.get("k8s-cluster-net")] = cpd.get(
+ "id"
+ )
for net in get_iterable(vnfr_descriptor["k8s-cluster"].get("nets")):
if net.get("id") in all_k8s_cluster_nets_cpds:
- net["external-connection-point-ref"] = all_k8s_cluster_nets_cpds[net.get("id")]
+ net["external-connection-point-ref"] = all_k8s_cluster_nets_cpds[
+ net.get("id")
+ ]
# update kdus
for kdu in get_iterable(vnfd.get("kdu")):
- additional_params, kdu_params = self._format_additional_params(ns_request,
- vnf_index,
- kdu_name=kdu["name"],
- descriptor=vnfd)
+ additional_params, kdu_params = self._format_additional_params(
+ ns_request, vnf_index, kdu_name=kdu["name"], descriptor=vnfd
+ )
kdu_k8s_namespace = vnf_k8s_namespace
kdu_model = kdu_params.get("kdu_model") if kdu_params else None
if kdu_params and kdu_params.get("k8s-namespace"):
kdu_k8s_namespace = kdu_params["k8s-namespace"]
+ kdu_deployment_name = ""
+ if kdu_params and kdu_params.get("kdu-deployment-name"):
+ kdu_deployment_name = kdu_params.get("kdu-deployment-name")
+
kdur = {
"additionalParams": additional_params,
"k8s-namespace": kdu_k8s_namespace,
+ "kdu-deployment-name": kdu_deployment_name,
"kdu-name": kdu["name"],
# TODO "name": "" Name of the VDU in the VIM
"ip-address": None, # mgmt-interface filled by LCM
vnfd_mgmt_cp = vnfd.get("mgmt-cp")
for vdu in vnfd.get("vdu", ()):
+ vdu_mgmt_cp = []
+ try:
+ configs = vnfd.get("df")[0]["lcm-operations-configuration"][
+ "operate-vnf-op-config"
+ ]["day1-2"]
+ vdu_config = utils.find_in_list(
+ configs, lambda config: config["id"] == vdu["id"]
+ )
+ except Exception:
+ vdu_config = None
+
+ try:
+ vdu_instantiation_level = utils.find_in_list(
+ vnfd.get("df")[0]["instantiation-level"][0]["vdu-level"],
+ lambda a_vdu_profile: a_vdu_profile["vdu-id"] == vdu["id"],
+ )
+ except Exception:
+ vdu_instantiation_level = None
+
+ if vdu_config:
+ external_connection_ee = utils.filter_in_list(
+ vdu_config.get("execution-environment-list", []),
+ lambda ee: "external-connection-point-ref" in ee,
+ )
+ for ee in external_connection_ee:
+ vdu_mgmt_cp.append(ee["external-connection-point-ref"])
+
additional_params, vdu_params = self._format_additional_params(
- ns_request, vnf_index, vdu_id=vdu["id"], descriptor=vnfd)
+ ns_request, vnf_index, vdu_id=vdu["id"], descriptor=vnfd
+ )
+
+ try:
+ vdu_virtual_storage_descriptors = utils.filter_in_list(
+ vnfd.get("virtual-storage-desc", []),
+ lambda stg_desc: stg_desc["id"] in vdu["virtual-storage-desc"]
+ )
+ except Exception:
+ vdu_virtual_storage_descriptors = []
vdur = {
"vdu-id-ref": vdu["id"],
# TODO "name": "" Name of the VDU in the VIM
"internal-connection-point": [],
"interfaces": [],
"additionalParams": additional_params,
- "vdu-name": vdu["name"]
+ "vdu-name": vdu["name"],
+ "virtual-storages": vdu_virtual_storage_descriptors
}
if vdu_params and vdu_params.get("config-units"):
vdur["config-units"] = vdu_params["config-units"]
if deep_get(vdu, ("supplemental-boot-data", "boot-data-drive")):
- vdur["boot-data-drive"] = vdu["supplemental-boot-data"]["boot-data-drive"]
+ vdur["boot-data-drive"] = vdu["supplemental-boot-data"][
+ "boot-data-drive"
+ ]
if vdu.get("pdu-type"):
vdur["pdu-type"] = vdu["pdu-type"]
vdur["name"] = vdu["pdu-type"]
"name": icp.get("id"),
}
- if "port-security-enabled" in icp:
- vdu_icp["port-security-enabled"] = icp["port-security-enabled"]
-
- if "port-security-disable-strategy" in icp:
- vdu_icp["port-security-disable-strategy"] = icp["port-security-disable-strategy"]
-
vdur["internal-connection-point"].append(vdu_icp)
for iface in icp.get("virtual-network-interface-requirement", ()):
- iface_fields = ("name", "mac-address")
- vdu_iface = {x: iface[x] for x in iface_fields if iface.get(x) is not None}
+ # Name, mac-address and interface position is taken from VNFD
+ # and included into VNFR. By this way RO can process this information
+ # while creating the VDU.
+ iface_fields = ("name", "mac-address", "position")
+ vdu_iface = {
+ x: iface[x] for x in iface_fields if iface.get(x) is not None
+ }
vdu_iface["internal-connection-point-ref"] = vdu_icp["id"]
+ if "port-security-enabled" in icp:
+ vdu_iface["port-security-enabled"] = icp[
+ "port-security-enabled"
+ ]
+
+ if "port-security-disable-strategy" in icp:
+ vdu_iface["port-security-disable-strategy"] = icp[
+ "port-security-disable-strategy"
+ ]
+
for ext_cp in vnfd.get("ext-cpd", ()):
if not ext_cp.get("int-cpd"):
continue
if ext_cp["int-cpd"].get("vdu-id") != vdu["id"]:
continue
if icp["id"] == ext_cp["int-cpd"].get("cpd"):
- vdu_iface["external-connection-point-ref"] = ext_cp.get("id")
+ vdu_iface["external-connection-point-ref"] = ext_cp.get(
+ "id"
+ )
+
+ if "port-security-enabled" in ext_cp:
+ vdu_iface["port-security-enabled"] = ext_cp[
+ "port-security-enabled"
+ ]
+
+ if "port-security-disable-strategy" in ext_cp:
+ vdu_iface["port-security-disable-strategy"] = ext_cp[
+ "port-security-disable-strategy"
+ ]
+
break
- if vnfd_mgmt_cp and vdu_iface.get("external-connection-point-ref") == vnfd_mgmt_cp:
+ if (
+ vnfd_mgmt_cp
+ and vdu_iface.get("external-connection-point-ref")
+ == vnfd_mgmt_cp
+ ):
vdu_iface["mgmt-vnf"] = True
- vdu_iface["mgmt-interface"] = True # TODO change to mgmt-vdu
+ vdu_iface["mgmt-interface"] = True
+
+ for ecp in vdu_mgmt_cp:
+ if vdu_iface.get("external-connection-point-ref") == ecp:
+ vdu_iface["mgmt-interface"] = True
if iface.get("virtual-interface"):
vdu_iface.update(deepcopy(iface["virtual-interface"]))
# TODO: Change for multiple df support
for df in get_iterable(nsd.get("df")):
for vnf_profile in get_iterable(df.get("vnf-profile")):
- for vlc_index, vlc in \
- enumerate(get_iterable(vnf_profile.get("virtual-link-connectivity"))):
- for cpd in get_iterable(vlc.get("constituent-cpd-id")):
- if cpd.get("constituent-cpd-id") == iface_ext_cp:
- vdu_iface["ns-vld-id"] = vlc.get("virtual-link-profile-id")
+ for vlc_index, vlc in enumerate(
+ get_iterable(
+ vnf_profile.get("virtual-link-connectivity")
+ )
+ ):
+ for cpd in get_iterable(
+ vlc.get("constituent-cpd-id")
+ ):
+ if (
+ cpd.get("constituent-cpd-id")
+ == iface_ext_cp
+ ):
+ vdu_iface["ns-vld-id"] = vlc.get(
+ "virtual-link-profile-id"
+ )
# if iface type is SRIOV or PASSTHROUGH, set pci-interfaces flag to True
- if vdu_iface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH"):
- nsr_descriptor["vld"][vlc_index]["pci-interfaces"] = True
+ if vdu_iface.get("type") in (
+ "SR-IOV",
+ "PCI-PASSTHROUGH",
+ ):
+ nsr_descriptor["vld"][vlc_index][
+ "pci-interfaces"
+ ] = True
break
elif vdu_iface.get("internal-connection-point-ref"):
vdu_iface["vnf-vld-id"] = icp.get("int-virtual-link-desc")
# TODO: store fixed IP address in the record (if it exists in the ICP)
# if iface type is SRIOV or PASSTHROUGH, set pci-interfaces flag to True
if vdu_iface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH"):
- ivld_index = utils.find_index_in_list(vnfd.get("int-virtual-link-desc", ()),
- lambda ivld:
- ivld["id"] == icp.get("int-virtual-link-desc")
- )
+ ivld_index = utils.find_index_in_list(
+ vnfd.get("int-virtual-link-desc", ()),
+ lambda ivld: ivld["id"]
+ == icp.get("int-virtual-link-desc"),
+ )
vnfr_descriptor["vld"][ivld_index]["pci-interfaces"] = True
vdur["interfaces"].append(vdu_iface)
if vdu.get("sw-image-desc"):
sw_image = utils.find_in_list(
vnfd.get("sw-image-desc", ()),
- lambda image: image["id"] == vdu.get("sw-image-desc"))
+ lambda image: image["id"] == vdu.get("sw-image-desc"),
+ )
nsr_sw_image_data = utils.find_in_list(
nsr_descriptor["image"],
- lambda nsr_image: (nsr_image.get("image") == sw_image.get("image"))
+ lambda nsr_image: (nsr_image.get("image") == sw_image.get("image")),
)
vdur["ns-image-id"] = nsr_sw_image_data["id"]
+ if vdu.get("alternative-sw-image-desc"):
+ alt_image_ids = []
+ for alt_image_id in vdu.get("alternative-sw-image-desc", ()):
+ sw_image = utils.find_in_list(
+ vnfd.get("sw-image-desc", ()),
+ lambda image: image["id"] == alt_image_id,
+ )
+ nsr_sw_image_data = utils.find_in_list(
+ nsr_descriptor["image"],
+ lambda nsr_image: (
+ nsr_image.get("image") == sw_image.get("image")
+ ),
+ )
+ alt_image_ids.append(nsr_sw_image_data["id"])
+ vdur["alt-image-ids"] = alt_image_ids
+
flavor_data_name = vdu["id"][:56] + "-flv"
nsr_flavor_desc = utils.find_in_list(
nsr_descriptor["flavor"],
- lambda flavor: flavor["name"] == flavor_data_name)
+ lambda flavor: flavor["name"] == flavor_data_name,
+ )
if nsr_flavor_desc:
vdur["ns-flavor-id"] = nsr_flavor_desc["id"]
- count = int(vdu.get("count", 1))
+ # Adding Affinity groups information to vdur
+ try:
+ vdu_profile_affinity_group = utils.find_in_list(
+ vnfd.get("df")[0]["vdu-profile"],
+ lambda a_vdu: a_vdu["id"] == vdu["id"],
+ )
+ except Exception:
+ vdu_profile_affinity_group = None
+
+ if vdu_profile_affinity_group:
+ affinity_group_ids = []
+ for affinity_group in vdu_profile_affinity_group.get(
+ "affinity-or-anti-affinity-group", ()
+ ):
+ vdu_affinity_group = utils.find_in_list(
+ vdu_profile_affinity_group.get(
+ "affinity-or-anti-affinity-group", ()
+ ),
+ lambda ag_fp: ag_fp["id"] == affinity_group["id"],
+ )
+ nsr_affinity_group = utils.find_in_list(
+ nsr_descriptor["affinity-or-anti-affinity-group"],
+ lambda nsr_ag: (
+ nsr_ag.get("ag-id") == vdu_affinity_group.get("id")
+ and nsr_ag.get("member-vnf-index")
+ == vnfr_descriptor.get("member-vnf-index-ref")
+ ),
+ )
+ # Update Affinity Group VIM name if VDU instantiation parameter is present
+ if vnf_params and vnf_params.get("affinity-or-anti-affinity-group"):
+ vnf_params_affinity_group = utils.find_in_list(
+ vnf_params["affinity-or-anti-affinity-group"],
+ lambda vnfp_ag: (
+ vnfp_ag.get("id") == vdu_affinity_group.get("id")
+ ),
+ )
+ if vnf_params_affinity_group.get("vim-affinity-group-id"):
+ nsr_affinity_group[
+ "vim-affinity-group-id"
+ ] = vnf_params_affinity_group["vim-affinity-group-id"]
+ affinity_group_ids.append(nsr_affinity_group["id"])
+ vdur["affinity-or-anti-affinity-group-id"] = affinity_group_ids
+
+ if vdu_instantiation_level:
+ count = vdu_instantiation_level.get("number-of-instances")
+ else:
+ count = 1
+
for index in range(0, count):
vdur = deepcopy(vdur)
for iface in vdur["interfaces"]:
- if iface.get("ip-address"):
+ if iface.get("ip-address") and index != 0:
iface["ip-address"] = increment_ip_mac(iface["ip-address"])
- if iface.get("mac-address"):
+ if iface.get("mac-address") and index != 0:
iface["mac-address"] = increment_ip_mac(iface["mac-address"])
vdur["_id"] = str(uuid4())
return vnfr_descriptor
+ def vca_status_refresh(self, session, ns_instance_content, filter_q):
+ """
+ vcaStatus in ns_instance_content maybe stale, check if it is stale and create lcm op
+ to refresh vca status by sending message to LCM when it is stale. Ignore otherwise.
+ :param session: contains "username", "admin", "force", "public", "project_id", "set_project"
+ :param ns_instance_content: ns instance content
+ :param filter_q: dict: query parameter containing vcaStatus-refresh as true or false
+ :return: None
+ """
+ time_now, time_delta = time(), time() - ns_instance_content["_admin"]["modified"]
+ force_refresh = isinstance(filter_q, dict) and filter_q.get('vcaStatusRefresh') == 'true'
+ threshold_reached = time_delta > 120
+ if force_refresh or threshold_reached:
+ operation, _id = "vca_status_refresh", ns_instance_content["_id"]
+ ns_instance_content["_admin"]["modified"] = time_now
+ self.db.set_one(self.topic, {"_id": _id}, ns_instance_content)
+ nslcmop_desc = NsLcmOpTopic._create_nslcmop(_id, operation, None)
+ self.format_on_new(nslcmop_desc, session["project_id"], make_public=session["public"])
+ nslcmop_desc["_admin"].pop("nsState")
+ self.msg.write("ns", operation, nslcmop_desc)
+ return
+
+ def show(self, session, _id, filter_q=None, api_req=False):
+ """
+ Get complete information on an ns instance.
+ :param session: contains "username", "admin", "force", "public", "project_id", "set_project"
+ :param _id: string, ns instance id
+ :param filter_q: dict: query parameter containing vcaStatusRefresh as true or false
+ :param api_req: True if this call is serving an external API request. False if serving internal request.
+ :return: dictionary, raise exception if not found.
+ """
+ ns_instance_content = super().show(session, _id, api_req)
+ self.vca_status_refresh(session, ns_instance_content, filter_q)
+ return ns_instance_content
+
def edit(self, session, _id, indata=None, kwargs=None, content=None):
- raise EngineException("Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR)
+ raise EngineException(
+ "Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR
+ )
class VnfrTopic(BaseTopic):
BaseTopic.__init__(self, db, fs, msg, auth)
def delete(self, session, _id, dry_run=False, not_send_msg=None):
- raise EngineException("Method delete called directly", HTTPStatus.INTERNAL_SERVER_ERROR)
+ raise EngineException(
+ "Method delete called directly", HTTPStatus.INTERNAL_SERVER_ERROR
+ )
def edit(self, session, _id, indata=None, kwargs=None, content=None):
- raise EngineException("Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR)
+ raise EngineException(
+ "Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR
+ )
def new(self, rollback, session, indata=None, kwargs=None, headers=None):
# Not used because vnfrs are created and deleted by NsrTopic class directly
- raise EngineException("Method new called directly", HTTPStatus.INTERNAL_SERVER_ERROR)
+ raise EngineException(
+ "Method new called directly", HTTPStatus.INTERNAL_SERVER_ERROR
+ )
class NsLcmOpTopic(BaseTopic):
topic = "nslcmops"
topic_msg = "ns"
- operation_schema = { # mapping between operation and jsonschema to validate
+ operation_schema = { # mapping between operation and jsonschema to validate
"instantiate": ns_instantiate,
"action": ns_action,
+ "update": ns_update,
"scale": ns_scale,
+ "heal": ns_heal,
"terminate": ns_terminate,
+ "migrate": ns_migrate,
+ "verticalscale": ns_verticalscale,
}
def __init__(self, db, fs, msg, auth):
BaseTopic.__init__(self, db, fs, msg, auth)
+ self.nsrtopic = NsrTopic(db, fs, msg, auth)
def _check_ns_operation(self, session, nsr, operation, indata):
"""
Check that user has enter right parameters for the operation
:param session: contains "username", "admin", "force", "public", "project_id", "set_project"
- :param operation: it can be: instantiate, terminate, action, TODO: update, heal
+ :param operation: it can be: instantiate, terminate, action, update, heal
:param indata: descriptor with the parameters of the operation
:return: None
"""
self._check_action_ns_operation(indata, nsr)
elif operation == "scale":
self._check_scale_ns_operation(indata, nsr)
+ elif operation == "update":
+ self._check_update_ns_operation(indata, nsr)
+ elif operation == "heal":
+ self._check_heal_ns_operation(indata, nsr)
elif operation == "instantiate":
self._check_instantiate_ns_operation(indata, nsr, session)
nsd = nsr["nsd"]
# check vnf_member_index
if indata.get("vnf_member_index"):
- indata["member_vnf_index"] = indata.pop("vnf_member_index") # for backward compatibility
+ indata["member_vnf_index"] = indata.pop(
+ "vnf_member_index"
+ ) # for backward compatibility
if indata.get("member_vnf_index"):
- vnfd = self._get_vnfd_from_vnf_member_index(indata["member_vnf_index"], nsr["_id"])
+ vnfd = self._get_vnfd_from_vnf_member_index(
+ indata["member_vnf_index"], nsr["_id"]
+ )
try:
- configs = vnfd.get("df")[0]["lcm-operations-configuration"]["operate-vnf-op-config"]["day1-2"]
+ configs = vnfd.get("df")[0]["lcm-operations-configuration"][
+ "operate-vnf-op-config"
+ ]["day1-2"]
except Exception:
configs = []
if indata.get("vdu_id"):
self._check_valid_vdu(vnfd, indata["vdu_id"])
descriptor_configuration = utils.find_in_list(
- configs,
- lambda config: config["id"] == indata["vdu_id"]
- ).get("config-primitive")
+ configs, lambda config: config["id"] == indata["vdu_id"]
+ )
elif indata.get("kdu_name"):
self._check_valid_kdu(vnfd, indata["kdu_name"])
descriptor_configuration = utils.find_in_list(
- configs,
- lambda config: config["id"] == indata.get("kdu_name")
- ).get("config-primitive")
+ configs, lambda config: config["id"] == indata.get("kdu_name")
+ )
else:
descriptor_configuration = utils.find_in_list(
- configs,
- lambda config: config["id"] == vnfd["id"]
- ).get("config-primitive")
+ configs, lambda config: config["id"] == vnfd["id"]
+ )
+ if descriptor_configuration is not None:
+ descriptor_configuration = descriptor_configuration.get(
+ "config-primitive"
+ )
else: # use a NSD
- descriptor_configuration = nsd.get("ns-configuration", {}).get("config-primitive")
+ descriptor_configuration = nsd.get("ns-configuration", {}).get(
+ "config-primitive"
+ )
# For k8s allows default primitives without validating the parameters
- if indata.get("kdu_name") and indata["primitive"] in ("upgrade", "rollback", "status", "inspect", "readme"):
+ if indata.get("kdu_name") and indata["primitive"] in (
+ "upgrade",
+ "rollback",
+ "status",
+ "inspect",
+ "readme",
+ ):
# TODO should be checked that rollback only can contains revsision_numbe????
if not indata.get("member_vnf_index"):
- raise EngineException("Missing action parameter 'member_vnf_index' for default KDU primitive '{}'"
- .format(indata["primitive"]))
+ raise EngineException(
+ "Missing action parameter 'member_vnf_index' for default KDU primitive '{}'".format(
+ indata["primitive"]
+ )
+ )
return
# if not, check primitive
for config_primitive in get_iterable(descriptor_configuration):
if paramd["name"] in in_primitive_params_copy:
del in_primitive_params_copy[paramd["name"]]
elif not paramd.get("default-value"):
- raise EngineException("Needed parameter {} not provided for primitive '{}'".format(
- paramd["name"], indata["primitive"]))
+ raise EngineException(
+ "Needed parameter {} not provided for primitive '{}'".format(
+ paramd["name"], indata["primitive"]
+ )
+ )
# check no extra primitive params are provided
if in_primitive_params_copy:
- raise EngineException("parameter/s '{}' not present at vnfd /nsd for primitive '{}'".format(
- list(in_primitive_params_copy.keys()), indata["primitive"]))
+ raise EngineException(
+ "parameter/s '{}' not present at vnfd /nsd for primitive '{}'".format(
+ list(in_primitive_params_copy.keys()), indata["primitive"]
+ )
+ )
break
else:
- raise EngineException("Invalid primitive '{}' is not present at vnfd/nsd".format(indata["primitive"]))
+ raise EngineException(
+ "Invalid primitive '{}' is not present at vnfd/nsd".format(
+ indata["primitive"]
+ )
+ )
+
+ def _check_update_ns_operation(self, indata, nsr) -> None:
+ """Validates the ns-update request according to updateType
+
+ If updateType is CHANGE_VNFPKG:
+ - it checks the vnfInstanceId, whether it's available under ns instance
+ - it checks the vnfdId whether it matches with the vnfd-id in the vnf-record of specified VNF.
+ Otherwise exception will be raised.
+ If updateType is REMOVE_VNF:
+ - it checks if the vnfInstanceId is available in the ns instance
+ - Otherwise exception will be raised.
+
+ Args:
+ indata: includes updateType such as CHANGE_VNFPKG,
+ nsr: network service record
+
+ Raises:
+ EngineException:
+ a meaningful error if given update parameters are not proper such as
+ "Error in validating ns-update request: <ID> does not match
+ with the vnfd-id of vnfinstance
+ http_code=HTTPStatus.UNPROCESSABLE_ENTITY"
+
+ """
+ try:
+ if indata["updateType"] == "CHANGE_VNFPKG":
+ # vnfInstanceId, nsInstanceId, vnfdId are mandatory
+ vnf_instance_id = indata["changeVnfPackageData"]["vnfInstanceId"]
+ ns_instance_id = indata["nsInstanceId"]
+ vnfd_id_2update = indata["changeVnfPackageData"]["vnfdId"]
+
+ if vnf_instance_id not in nsr["constituent-vnfr-ref"]:
+
+ raise EngineException(
+ f"Error in validating ns-update request: vnf {vnf_instance_id} does not "
+ f"belong to NS {ns_instance_id}",
+ http_code=HTTPStatus.UNPROCESSABLE_ENTITY,
+ )
+
+ # Getting vnfrs through the ns_instance_id
+ vnfrs = self.db.get_list("vnfrs", {"nsr-id-ref": ns_instance_id})
+ constituent_vnfd_id = next(
+ (
+ vnfr["vnfd-id"]
+ for vnfr in vnfrs
+ if vnfr["id"] == vnf_instance_id
+ ),
+ None,
+ )
+
+ # Check the given vnfd-id belongs to given vnf instance
+ if constituent_vnfd_id and (vnfd_id_2update != constituent_vnfd_id):
+
+ raise EngineException(
+ f"Error in validating ns-update request: vnfd-id {vnfd_id_2update} does not "
+ f"match with the vnfd-id: {constituent_vnfd_id} of VNF instance: {vnf_instance_id}",
+ http_code=HTTPStatus.UNPROCESSABLE_ENTITY,
+ )
+
+ # Validating the ns update timeout
+ if (
+ indata.get("timeout_ns_update")
+ and indata["timeout_ns_update"] < 300
+ ):
+ raise EngineException(
+ "Error in validating ns-update request: {} second is not enough "
+ "to upgrade the VNF instance: {}".format(
+ indata["timeout_ns_update"], vnf_instance_id
+ ),
+ http_code=HTTPStatus.UNPROCESSABLE_ENTITY,
+ )
+ elif indata["updateType"] == "REMOVE_VNF":
+ vnf_instance_id = indata["removeVnfInstanceId"]
+ ns_instance_id = indata["nsInstanceId"]
+ if vnf_instance_id not in nsr["constituent-vnfr-ref"]:
+ raise EngineException(
+ "Invalid VNF Instance Id. '{}' is not "
+ "present in the NS '{}'".format(vnf_instance_id, ns_instance_id)
+ )
+
+ except (
+ DbException,
+ AttributeError,
+ IndexError,
+ KeyError,
+ ValueError,
+ ) as e:
+ raise type(e)(
+ "Ns update request could not be processed with error: {}.".format(e)
+ )
def _check_scale_ns_operation(self, indata, nsr):
- vnfd = self._get_vnfd_from_vnf_member_index(indata["scaleVnfData"]["scaleByStepData"]["member-vnf-index"],
- nsr["_id"])
+ vnfd = self._get_vnfd_from_vnf_member_index(
+ indata["scaleVnfData"]["scaleByStepData"]["member-vnf-index"], nsr["_id"]
+ )
for scaling_aspect in get_iterable(vnfd.get("df", ())[0]["scaling-aspect"]):
- if indata["scaleVnfData"]["scaleByStepData"]["scaling-group-descriptor"] == scaling_aspect["id"]:
+ if (
+ indata["scaleVnfData"]["scaleByStepData"]["scaling-group-descriptor"]
+ == scaling_aspect["id"]
+ ):
break
else:
- raise EngineException("Invalid scaleVnfData:scaleByStepData:scaling-group-descriptor '{}' is not "
- "present at vnfd:scaling-aspect"
- .format(indata["scaleVnfData"]["scaleByStepData"]["scaling-group-descriptor"]))
+ raise EngineException(
+ "Invalid scaleVnfData:scaleByStepData:scaling-group-descriptor '{}' is not "
+ "present at vnfd:scaling-aspect".format(
+ indata["scaleVnfData"]["scaleByStepData"][
+ "scaling-group-descriptor"
+ ]
+ )
+ )
+
+ def _check_heal_ns_operation(self, indata, nsr):
+ return
def _check_instantiate_ns_operation(self, indata, nsr, session):
vnf_member_index_to_vnfd = {} # map between vnf_member_index to vnf descriptor.
if vnf_member_index_to_vnfd.get(member_vnf_index):
vnfd = vnf_member_index_to_vnfd[member_vnf_index]
else:
- vnfd = self._get_vnfd_from_vnf_member_index(member_vnf_index, nsr["_id"])
- vnf_member_index_to_vnfd[member_vnf_index] = vnfd # add to cache, avoiding a later look for
+ vnfd = self._get_vnfd_from_vnf_member_index(
+ member_vnf_index, nsr["_id"]
+ )
+ vnf_member_index_to_vnfd[
+ member_vnf_index
+ ] = vnfd # add to cache, avoiding a later look for
self._check_vnf_instantiation_params(in_vnf, vnfd)
if in_vnf.get("vimAccountId"):
- self._check_valid_vim_account(in_vnf["vimAccountId"], vim_accounts, session)
+ self._check_valid_vim_account(
+ in_vnf["vimAccountId"], vim_accounts, session
+ )
for in_vld in get_iterable(indata.get("vld")):
- self._check_valid_wim_account(in_vld.get("wimAccountId"), wim_accounts, session)
+ self._check_valid_wim_account(
+ in_vld.get("wimAccountId"), wim_accounts, session
+ )
for vldd in get_iterable(nsd.get("virtual-link-desc")):
if in_vld["name"] == vldd["id"]:
break
else:
- raise EngineException("Invalid parameter vld:name='{}' is not present at nsd:vld".format(
- in_vld["name"]))
+ raise EngineException(
+ "Invalid parameter vld:name='{}' is not present at nsd:vld".format(
+ in_vld["name"]
+ )
+ )
def _get_vnfd_from_vnf_member_index(self, member_vnf_index, nsr_id):
# Obtain vnf descriptor. The vnfr is used to get the vnfd._id used for this member_vnf_index
- vnfr = self.db.get_one("vnfrs",
- {"nsr-id-ref": nsr_id, "member-vnf-index-ref": member_vnf_index},
- fail_on_empty=False)
+ vnfr = self.db.get_one(
+ "vnfrs",
+ {"nsr-id-ref": nsr_id, "member-vnf-index-ref": member_vnf_index},
+ fail_on_empty=False,
+ )
if not vnfr:
- raise EngineException("Invalid parameter member_vnf_index='{}' is not one of the "
- "nsd:constituent-vnfd".format(member_vnf_index))
- vnfd = self.db.get_one("vnfds", {"_id": vnfr["vnfd-id"]}, fail_on_empty=False)
+ raise EngineException(
+ "Invalid parameter member_vnf_index='{}' is not one of the "
+ "nsd:constituent-vnfd".format(member_vnf_index)
+ )
+
+ ## Backwards compatibility: if there is no revision, get it from the one and only VNFD entry
+ if "revision" in vnfr:
+ vnfd_revision = vnfr["vnfd-id"] + ":" + str(vnfr["revision"])
+ vnfd = self.db.get_one("vnfds_revisions", {"_id": vnfd_revision}, fail_on_empty=False)
+ else:
+ vnfd = self.db.get_one("vnfds", {"_id": vnfr["vnfd-id"]}, fail_on_empty=False)
+
if not vnfd:
- raise EngineException("vnfd id={} has been deleted!. Operation cannot be performed".
- format(vnfr["vnfd-id"]))
+ raise EngineException(
+ "vnfd id={} has been deleted!. Operation cannot be performed".format(
+ vnfr["vnfd-id"]
+ )
+ )
return vnfd
def _check_valid_vdu(self, vnfd, vdu_id):
if vdud["id"] == vdu_id:
return vdud
else:
- raise EngineException("Invalid parameter vdu_id='{}' not present at vnfd:vdu:id".format(vdu_id))
+ raise EngineException(
+ "Invalid parameter vdu_id='{}' not present at vnfd:vdu:id".format(
+ vdu_id
+ )
+ )
def _check_valid_kdu(self, vnfd, kdu_name):
for kdud in get_iterable(vnfd.get("kdu")):
if kdud["name"] == kdu_name:
return kdud
else:
- raise EngineException("Invalid parameter kdu_name='{}' not present at vnfd:kdu:name".format(kdu_name))
+ raise EngineException(
+ "Invalid parameter kdu_name='{}' not present at vnfd:kdu:name".format(
+ kdu_name
+ )
+ )
def _check_vnf_instantiation_params(self, in_vnf, vnfd):
for in_vdu in get_iterable(in_vnf.get("vdu")):
if in_vdu["id"] == vdu["id"]:
for volume in get_iterable(in_vdu.get("volume")):
for volumed in get_iterable(vdu.get("virtual-storage-desc")):
- if volumed["id"] == volume["name"]:
+ if volumed == volume["name"]:
break
else:
- raise EngineException("Invalid parameter vnf[member-vnf-index='{}']:vdu[id='{}']:"
- "volume:name='{}' is not present at "
- "vnfd:vdu:virtual-storage-desc list".
- format(in_vnf["member-vnf-index"], in_vdu["id"],
- volume["id"]))
+ raise EngineException(
+ "Invalid parameter vnf[member-vnf-index='{}']:vdu[id='{}']:"
+ "volume:name='{}' is not present at "
+ "vnfd:vdu:virtual-storage-desc list".format(
+ in_vnf["member-vnf-index"],
+ in_vdu["id"],
+ volume["id"],
+ )
+ )
vdu_if_names = set()
for cpd in get_iterable(vdu.get("int-cpd")):
- for iface in get_iterable(cpd.get("virtual-network-interface-requirement")):
+ for iface in get_iterable(
+ cpd.get("virtual-network-interface-requirement")
+ ):
vdu_if_names.add(iface.get("name"))
- for in_iface in get_iterable(in_vdu["interface"]):
+ for in_iface in get_iterable(in_vdu.get("interface")):
if in_iface["name"] in vdu_if_names:
break
else:
- raise EngineException("Invalid parameter vnf[member-vnf-index='{}']:vdu[id='{}']:"
- "int-cpd[id='{}'] is not present at vnfd:vdu:int-cpd"
- .format(in_vnf["member-vnf-index"], in_vdu["id"],
- in_iface["name"]))
+ raise EngineException(
+ "Invalid parameter vnf[member-vnf-index='{}']:vdu[id='{}']:"
+ "int-cpd[id='{}'] is not present at vnfd:vdu:int-cpd".format(
+ in_vnf["member-vnf-index"],
+ in_vdu["id"],
+ in_iface["name"],
+ )
+ )
break
else:
- raise EngineException("Invalid parameter vnf[member-vnf-index='{}']:vdu[id='{}'] is not present "
- "at vnfd:vdu".format(in_vnf["member-vnf-index"], in_vdu["id"]))
+ raise EngineException(
+ "Invalid parameter vnf[member-vnf-index='{}']:vdu[id='{}'] is not present "
+ "at vnfd:vdu".format(in_vnf["member-vnf-index"], in_vdu["id"])
+ )
- vnfd_ivlds_cpds = {ivld.get("id"): set() for ivld in get_iterable(vnfd.get("int-virtual-link-desc"))}
+ vnfd_ivlds_cpds = {
+ ivld.get("id"): set()
+ for ivld in get_iterable(vnfd.get("int-virtual-link-desc"))
+ }
for vdu in get_iterable(vnfd.get("vdu")):
for cpd in get_iterable(vnfd.get("int-cpd")):
if cpd.get("int-virtual-link-desc"):
if in_icp["id-ref"] in vnfd_ivlds_cpds[in_ivld.get("name")]:
break
else:
- raise EngineException("Invalid parameter vnf[member-vnf-index='{}']:internal-vld[name"
- "='{}']:internal-connection-point[id-ref:'{}'] is not present at "
- "vnfd:internal-vld:name/id:internal-connection-point"
- .format(in_vnf["member-vnf-index"], in_ivld["name"],
- in_icp["id-ref"]))
+ raise EngineException(
+ "Invalid parameter vnf[member-vnf-index='{}']:internal-vld[name"
+ "='{}']:internal-connection-point[id-ref:'{}'] is not present at "
+ "vnfd:internal-vld:name/id:internal-connection-point".format(
+ in_vnf["member-vnf-index"],
+ in_ivld["name"],
+ in_icp["id-ref"],
+ )
+ )
else:
- raise EngineException("Invalid parameter vnf[member-vnf-index='{}']:internal-vld:name='{}'"
- " is not present at vnfd '{}'".format(in_vnf["member-vnf-index"],
- in_ivld["name"], vnfd["id"]))
+ raise EngineException(
+ "Invalid parameter vnf[member-vnf-index='{}']:internal-vld:name='{}'"
+ " is not present at vnfd '{}'".format(
+ in_vnf["member-vnf-index"], in_ivld["name"], vnfd["id"]
+ )
+ )
def _check_valid_vim_account(self, vim_account, vim_accounts, session):
if vim_account in vim_accounts:
db_filter["_id"] = vim_account
self.db.get_one("vim_accounts", db_filter)
except Exception:
- raise EngineException("Invalid vimAccountId='{}' not present for the project".format(vim_account))
+ raise EngineException(
+ "Invalid vimAccountId='{}' not present for the project".format(
+ vim_account
+ )
+ )
vim_accounts.append(vim_account)
+ def _get_vim_account(self, vim_id: str, session):
+ try:
+ db_filter = self._get_project_filter(session)
+ db_filter["_id"] = vim_id
+ return self.db.get_one("vim_accounts", db_filter)
+ except Exception:
+ raise EngineException(
+ "Invalid vimAccountId='{}' not present for the project".format(
+ vim_id
+ )
+ )
+
def _check_valid_wim_account(self, wim_account, wim_accounts, session):
if not isinstance(wim_account, str):
return
db_filter["_id"] = wim_account
self.db.get_one("wim_accounts", db_filter)
except Exception:
- raise EngineException("Invalid wimAccountId='{}' not present for the project".format(wim_account))
+ raise EngineException(
+ "Invalid wimAccountId='{}' not present for the project".format(
+ wim_account
+ )
+ )
wim_accounts.append(wim_account)
- def _look_for_pdu(self, session, rollback, vnfr, vim_account, vnfr_update, vnfr_update_rollback):
+ def _look_for_pdu(
+ self, session, rollback, vnfr, vim_account, vnfr_update, vnfr_update_rollback
+ ):
"""
Look for a free PDU in the catalog matching vdur type and interfaces. Fills vnfr.vdur with the interface
(ip_address, ...) information.
else:
raise EngineException(
"No PDU of type={} at vim_account={} found for member_vnf_index={}, vdu={} matching interface "
- "names".format(pdu_type, vim_account, vnfr["member-vnf-index-ref"], vdur["vdu-id-ref"]))
+ "names".format(
+ pdu_type,
+ vim_account,
+ vnfr["member-vnf-index-ref"],
+ vdur["vdu-id-ref"],
+ )
+ )
# step 2. Update pdu
rollback_pdu = {
"_admin.usage.nsr_id": None,
"_admin.usage.vdur": None,
}
- self.db.set_one("pdus", {"_id": pdu["_id"]},
- {"_admin.usageState": "IN_USE",
- "_admin.usage": {"vnfr_id": vnfr["_id"],
- "nsr_id": vnfr["nsr-id-ref"],
- "vdur": vdur["vdu-id-ref"]}
- })
- rollback.append({"topic": "pdus", "_id": pdu["_id"], "operation": "set", "content": rollback_pdu})
+ self.db.set_one(
+ "pdus",
+ {"_id": pdu["_id"]},
+ {
+ "_admin.usageState": "IN_USE",
+ "_admin.usage": {
+ "vnfr_id": vnfr["_id"],
+ "nsr_id": vnfr["nsr-id-ref"],
+ "vdur": vdur["vdu-id-ref"],
+ },
+ },
+ )
+ rollback.append(
+ {
+ "topic": "pdus",
+ "_id": pdu["_id"],
+ "operation": "set",
+ "content": rollback_pdu,
+ }
+ )
# step 3. Fill vnfr info by filling vdur
vdu_text = "vdur.{}".format(vdur_index)
if pdu_interface["name"] == vdur_interface["name"]:
iface_text = vdu_text + ".interfaces.{}".format(iface_index)
for k, v in pdu_interface.items():
- if k in ("ip-address", "mac-address"): # TODO: switch-xxxxx must be inserted
+ if k in (
+ "ip-address",
+ "mac-address",
+ ): # TODO: switch-xxxxx must be inserted
vnfr_update[iface_text + ".{}".format(k)] = v
- vnfr_update_rollback[iface_text + ".{}".format(k)] = vdur_interface.get(v)
+ vnfr_update_rollback[
+ iface_text + ".{}".format(k)
+ ] = vdur_interface.get(v)
if pdu_interface.get("ip-address"):
- if vdur_interface.get("mgmt-interface") or vdur_interface.get("mgmt-vnf"):
- vnfr_update_rollback[vdu_text + ".ip-address"] = vdur.get("ip-address")
- vnfr_update[vdu_text + ".ip-address"] = pdu_interface["ip-address"]
+ if vdur_interface.get(
+ "mgmt-interface"
+ ) or vdur_interface.get("mgmt-vnf"):
+ vnfr_update_rollback[
+ vdu_text + ".ip-address"
+ ] = vdur.get("ip-address")
+ vnfr_update[vdu_text + ".ip-address"] = pdu_interface[
+ "ip-address"
+ ]
if vdur_interface.get("mgmt-vnf"):
- vnfr_update_rollback["ip-address"] = vnfr.get("ip-address")
+ vnfr_update_rollback["ip-address"] = vnfr.get(
+ "ip-address"
+ )
vnfr_update["ip-address"] = pdu_interface["ip-address"]
- vnfr_update[vdu_text + ".ip-address"] = pdu_interface["ip-address"]
- if pdu_interface.get("vim-network-name") or pdu_interface.get("vim-network-id"):
- ifaces_forcing_vim_network.append({
- "name": vdur_interface.get("vnf-vld-id") or vdur_interface.get("ns-vld-id"),
- "vnf-vld-id": vdur_interface.get("vnf-vld-id"),
- "ns-vld-id": vdur_interface.get("ns-vld-id")})
+ vnfr_update[vdu_text + ".ip-address"] = pdu_interface[
+ "ip-address"
+ ]
+ if pdu_interface.get("vim-network-name") or pdu_interface.get(
+ "vim-network-id"
+ ):
+ ifaces_forcing_vim_network.append(
+ {
+ "name": vdur_interface.get("vnf-vld-id")
+ or vdur_interface.get("ns-vld-id"),
+ "vnf-vld-id": vdur_interface.get("vnf-vld-id"),
+ "ns-vld-id": vdur_interface.get("ns-vld-id"),
+ }
+ )
if pdu_interface.get("vim-network-id"):
- ifaces_forcing_vim_network[-1]["vim-network-id"] = pdu_interface["vim-network-id"]
+ ifaces_forcing_vim_network[-1][
+ "vim-network-id"
+ ] = pdu_interface["vim-network-id"]
if pdu_interface.get("vim-network-name"):
- ifaces_forcing_vim_network[-1]["vim-network-name"] = pdu_interface["vim-network-name"]
+ ifaces_forcing_vim_network[-1][
+ "vim-network-name"
+ ] = pdu_interface["vim-network-name"]
break
return ifaces_forcing_vim_network
- def _look_for_k8scluster(self, session, rollback, vnfr, vim_account, vnfr_update, vnfr_update_rollback):
+ def _look_for_k8scluster(
+ self, session, rollback, vnfr, vim_account, vnfr_update, vnfr_update_rollback
+ ):
"""
Look for an available k8scluster for all the kuds in the vnfd matching version and cni requirements.
Fills vnfr.kdur with the selected k8scluster
# restrict by cni
if vnfr["k8s-cluster"].get("cni"):
k8s_requirements["cni"] = vnfr["k8s-cluster"]["cni"]
- if not set(vnfr["k8s-cluster"]["cni"]).intersection(k8scluster.get("cni", ())):
+ if not set(vnfr["k8s-cluster"]["cni"]).intersection(
+ k8scluster.get("cni", ())
+ ):
continue
# restrict by version
if vnfr["k8s-cluster"].get("version"):
# restrict by number of networks
if vnfr["k8s-cluster"].get("nets"):
k8s_requirements["networks"] = len(vnfr["k8s-cluster"]["nets"])
- if not k8scluster.get("nets") or len(k8scluster["nets"]) < len(vnfr["k8s-cluster"]["nets"]):
+ if not k8scluster.get("nets") or len(k8scluster["nets"]) < len(
+ vnfr["k8s-cluster"]["nets"]
+ ):
continue
break
else:
- raise EngineException("No k8scluster with requirements='{}' at vim_account={} found for member_vnf_index={}"
- .format(k8s_requirements, vim_account, vnfr["member-vnf-index-ref"]))
+ raise EngineException(
+ "No k8scluster with requirements='{}' at vim_account={} found for member_vnf_index={}".format(
+ k8s_requirements, vim_account, vnfr["member-vnf-index-ref"]
+ )
+ )
for kdur_index, kdur in enumerate(get_iterable(vnfr.get("kdur"))):
# step 3. Fill vnfr info by filling kdur
else:
vim_net = k8scluster["nets"][k8scluster_net_list[0]]
k8scluster_net_list.pop(0)
- vnfr_update_rollback["k8s-cluster.nets.{}.vim_net".format(net_index)] = None
+ vnfr_update_rollback[
+ "k8s-cluster.nets.{}.vim_net".format(net_index)
+ ] = None
vnfr_update["k8s-cluster.nets.{}.vim_net".format(net_index)] = vim_net
- if vim_net and (kdur_net.get("vnf-vld-id") or kdur_net.get("ns-vld-id")):
- ifaces_forcing_vim_network.append({
- "name": kdur_net.get("vnf-vld-id") or kdur_net.get("ns-vld-id"),
- "vnf-vld-id": kdur_net.get("vnf-vld-id"),
- "ns-vld-id": kdur_net.get("ns-vld-id"),
- "vim-network-name": vim_net, # TODO can it be vim-network-id ???
- })
+ if vim_net and (
+ kdur_net.get("vnf-vld-id") or kdur_net.get("ns-vld-id")
+ ):
+ ifaces_forcing_vim_network.append(
+ {
+ "name": kdur_net.get("vnf-vld-id")
+ or kdur_net.get("ns-vld-id"),
+ "vnf-vld-id": kdur_net.get("vnf-vld-id"),
+ "ns-vld-id": kdur_net.get("ns-vld-id"),
+ "vim-network-name": vim_net, # TODO can it be vim-network-id ???
+ }
+ )
# TODO check that this forcing is not incompatible with other forcing
return ifaces_forcing_vim_network
+ def _update_vnfrs_from_nsd(self, nsr):
+ try:
+ nsr_id = nsr["_id"]
+ nsd = nsr["nsd"]
+
+ step = "Getting vnf_profiles from nsd"
+ vnf_profiles = nsd.get("df", [{}])[0].get("vnf-profile", ())
+ vld_fixed_ip_connection_point_data = {}
+
+ step = "Getting ip-address info from vnf_profile if it exists"
+ for vnfp in vnf_profiles:
+ # Checking ip-address info from nsd.vnf_profile and storing
+ for vlc in vnfp.get("virtual-link-connectivity", ()):
+ for cpd in vlc.get("constituent-cpd-id", ()):
+ if cpd.get("ip-address"):
+ step = "Storing ip-address info"
+ vld_fixed_ip_connection_point_data.update({vlc.get("virtual-link-profile-id") + '.' + cpd.get("constituent-base-element-id"): {
+ "vnfd-connection-point-ref": cpd.get(
+ "constituent-cpd-id"),
+ "ip-address": cpd.get(
+ "ip-address")}})
+
+ # Inserting ip address to vnfr
+ if len(vld_fixed_ip_connection_point_data) > 0:
+ step = "Getting vnfrs"
+ vnfrs = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
+ for item in vld_fixed_ip_connection_point_data.keys():
+ step = "Filtering vnfrs"
+ vnfr = next(filter(lambda vnfr: vnfr["member-vnf-index-ref"] == item.split('.')[1], vnfrs), None)
+ if vnfr:
+ vnfr_update = {}
+ for vdur_index, vdur in enumerate(vnfr["vdur"]):
+ for iface_index, iface in enumerate(vdur["interfaces"]):
+ step = "Looking for matched interface"
+ if (
+ iface.get("external-connection-point-ref")
+ == vld_fixed_ip_connection_point_data[item].get("vnfd-connection-point-ref") and
+ iface.get("ns-vld-id") == item.split('.')[0]
+
+ ):
+ vnfr_update_text = "vdur.{}.interfaces.{}".format(
+ vdur_index, iface_index
+ )
+ step = "Storing info in order to update vnfr"
+ vnfr_update[
+ vnfr_update_text + ".ip-address"
+ ] = increment_ip_mac(
+ vld_fixed_ip_connection_point_data[item].get("ip-address"),
+ vdur.get("count-index", 0), )
+ vnfr_update[vnfr_update_text + ".fixed-ip"] = True
+
+ step = "updating vnfr at database"
+ self.db.set_one("vnfrs", {"_id": vnfr["_id"]}, vnfr_update)
+ except (
+ ValidationError,
+ EngineException,
+ DbException,
+ MsgException,
+ FsException,
+ ) as e:
+ raise type(e)("{} while '{}'".format(e, step), http_code=e.http_code)
+
def _update_vnfrs(self, session, rollback, nsr, indata):
# get vnfr
nsr_id = nsr["_id"]
# update vim-account-id
vim_account = indata["vimAccountId"]
+ vca_id = self._get_vim_account(vim_account, session).get("vca")
# check instantiate parameters
for vnf_inst_params in get_iterable(indata.get("vnf")):
if vnf_inst_params["member-vnf-index"] != member_vnf_index:
continue
if vnf_inst_params.get("vimAccountId"):
vim_account = vnf_inst_params.get("vimAccountId")
+ vca_id = self._get_vim_account(vim_account, session).get("vca")
# get vnf.vdu.interface instantiation params to update vnfr.vdur.interfaces ip, mac
for vdu_inst_param in get_iterable(vnf_inst_params.get("vdu")):
for vdur_index, vdur in enumerate(vnfr["vdur"]):
if vdu_inst_param["id"] != vdur["vdu-id-ref"]:
continue
- for iface_inst_param in get_iterable(vdu_inst_param.get("interface")):
- iface_index, _ = next(i for i in enumerate(vdur["interfaces"])
- if i[1]["name"] == iface_inst_param["name"])
- vnfr_update_text = "vdur.{}.interfaces.{}".format(vdur_index, iface_index)
+ for iface_inst_param in get_iterable(
+ vdu_inst_param.get("interface")
+ ):
+ iface_index, _ = next(
+ i
+ for i in enumerate(vdur["interfaces"])
+ if i[1]["name"] == iface_inst_param["name"]
+ )
+ vnfr_update_text = "vdur.{}.interfaces.{}".format(
+ vdur_index, iface_index
+ )
if iface_inst_param.get("ip-address"):
- vnfr_update[vnfr_update_text + ".ip-address"] = increment_ip_mac(
- iface_inst_param.get("ip-address"), vdur.get("count-index", 0))
+ vnfr_update[
+ vnfr_update_text + ".ip-address"
+ ] = increment_ip_mac(
+ iface_inst_param.get("ip-address"),
+ vdur.get("count-index", 0),
+ )
vnfr_update[vnfr_update_text + ".fixed-ip"] = True
if iface_inst_param.get("mac-address"):
- vnfr_update[vnfr_update_text + ".mac-address"] = increment_ip_mac(
- iface_inst_param.get("mac-address"), vdur.get("count-index", 0))
+ vnfr_update[
+ vnfr_update_text + ".mac-address"
+ ] = increment_ip_mac(
+ iface_inst_param.get("mac-address"),
+ vdur.get("count-index", 0),
+ )
vnfr_update[vnfr_update_text + ".fixed-mac"] = True
if iface_inst_param.get("floating-ip-required"):
- vnfr_update[vnfr_update_text + ".floating-ip-required"] = True
+ vnfr_update[
+ vnfr_update_text + ".floating-ip-required"
+ ] = True
# get vnf.internal-vld.internal-conection-point instantiation params to update vnfr.vdur.interfaces
# TODO update vld with the ip-profile
- for ivld_inst_param in get_iterable(vnf_inst_params.get("internal-vld")):
- for icp_inst_param in get_iterable(ivld_inst_param.get("internal-connection-point")):
+ for ivld_inst_param in get_iterable(
+ vnf_inst_params.get("internal-vld")
+ ):
+ for icp_inst_param in get_iterable(
+ ivld_inst_param.get("internal-connection-point")
+ ):
# look for iface
for vdur_index, vdur in enumerate(vnfr["vdur"]):
for iface_index, iface in enumerate(vdur["interfaces"]):
- if iface.get("internal-connection-point-ref") == icp_inst_param["id-ref"]:
- vnfr_update_text = "vdur.{}.interfaces.{}".format(vdur_index, iface_index)
+ if (
+ iface.get("internal-connection-point-ref")
+ == icp_inst_param["id-ref"]
+ ):
+ vnfr_update_text = "vdur.{}.interfaces.{}".format(
+ vdur_index, iface_index
+ )
if icp_inst_param.get("ip-address"):
- vnfr_update[vnfr_update_text + ".ip-address"] = increment_ip_mac(
- icp_inst_param.get("ip-address"), vdur.get("count-index", 0))
- vnfr_update[vnfr_update_text + ".fixed-ip"] = True
+ vnfr_update[
+ vnfr_update_text + ".ip-address"
+ ] = increment_ip_mac(
+ icp_inst_param.get("ip-address"),
+ vdur.get("count-index", 0),
+ )
+ vnfr_update[
+ vnfr_update_text + ".fixed-ip"
+ ] = True
if icp_inst_param.get("mac-address"):
- vnfr_update[vnfr_update_text + ".mac-address"] = increment_ip_mac(
- icp_inst_param.get("mac-address"), vdur.get("count-index", 0))
- vnfr_update[vnfr_update_text + ".fixed-mac"] = True
+ vnfr_update[
+ vnfr_update_text + ".mac-address"
+ ] = increment_ip_mac(
+ icp_inst_param.get("mac-address"),
+ vdur.get("count-index", 0),
+ )
+ vnfr_update[
+ vnfr_update_text + ".fixed-mac"
+ ] = True
break
# get ip address from instantiation parameters.vld.vnfd-connection-point-ref
for vld_inst_param in get_iterable(indata.get("vld")):
- for vnfcp_inst_param in get_iterable(vld_inst_param.get("vnfd-connection-point-ref")):
+ for vnfcp_inst_param in get_iterable(
+ vld_inst_param.get("vnfd-connection-point-ref")
+ ):
if vnfcp_inst_param["member-vnf-index-ref"] != member_vnf_index:
continue
# look for iface
for vdur_index, vdur in enumerate(vnfr["vdur"]):
for iface_index, iface in enumerate(vdur["interfaces"]):
- if iface.get("external-connection-point-ref") == \
- vnfcp_inst_param["vnfd-connection-point-ref"]:
- vnfr_update_text = "vdur.{}.interfaces.{}".format(vdur_index, iface_index)
+ if (
+ iface.get("external-connection-point-ref")
+ == vnfcp_inst_param["vnfd-connection-point-ref"]
+ ):
+ vnfr_update_text = "vdur.{}.interfaces.{}".format(
+ vdur_index, iface_index
+ )
if vnfcp_inst_param.get("ip-address"):
- vnfr_update[vnfr_update_text + ".ip-address"] = increment_ip_mac(
- vnfcp_inst_param.get("ip-address"), vdur.get("count-index", 0))
+ vnfr_update[
+ vnfr_update_text + ".ip-address"
+ ] = increment_ip_mac(
+ vnfcp_inst_param.get("ip-address"),
+ vdur.get("count-index", 0),
+ )
vnfr_update[vnfr_update_text + ".fixed-ip"] = True
if vnfcp_inst_param.get("mac-address"):
- vnfr_update[vnfr_update_text + ".mac-address"] = increment_ip_mac(
- vnfcp_inst_param.get("mac-address"), vdur.get("count-index", 0))
+ vnfr_update[
+ vnfr_update_text + ".mac-address"
+ ] = increment_ip_mac(
+ vnfcp_inst_param.get("mac-address"),
+ vdur.get("count-index", 0),
+ )
vnfr_update[vnfr_update_text + ".fixed-mac"] = True
break
vnfr_update["vim-account-id"] = vim_account
vnfr_update_rollback["vim-account-id"] = vnfr.get("vim-account-id")
+ if vca_id:
+ vnfr_update["vca-id"] = vca_id
+ vnfr_update_rollback["vca-id"] = vnfr.get("vca-id")
+
# get pdu
- ifaces_forcing_vim_network = self._look_for_pdu(session, rollback, vnfr, vim_account, vnfr_update,
- vnfr_update_rollback)
+ ifaces_forcing_vim_network = self._look_for_pdu(
+ session, rollback, vnfr, vim_account, vnfr_update, vnfr_update_rollback
+ )
# get kdus
- ifaces_forcing_vim_network += self._look_for_k8scluster(session, rollback, vnfr, vim_account, vnfr_update,
- vnfr_update_rollback)
+ ifaces_forcing_vim_network += self._look_for_k8scluster(
+ session, rollback, vnfr, vim_account, vnfr_update, vnfr_update_rollback
+ )
# update database vnfr
self.db.set_one("vnfrs", {"_id": vnfr["_id"]}, vnfr_update)
- rollback.append({"topic": "vnfrs", "_id": vnfr["_id"], "operation": "set", "content": vnfr_update_rollback})
+ rollback.append(
+ {
+ "topic": "vnfrs",
+ "_id": vnfr["_id"],
+ "operation": "set",
+ "content": vnfr_update_rollback,
+ }
+ )
# Update indada in case pdu forces to use a concrete vim-network-name
# TODO check if user has already insert a vim-network-name and raises an error
if iface_info.get("ns-vld-id"):
if "vld" not in indata:
indata["vld"] = []
- indata["vld"].append({key: iface_info[key] for key in
- ("name", "vim-network-name", "vim-network-id") if iface_info.get(key)})
+ indata["vld"].append(
+ {
+ key: iface_info[key]
+ for key in ("name", "vim-network-name", "vim-network-id")
+ if iface_info.get(key)
+ }
+ )
elif iface_info.get("vnf-vld-id"):
if "vnf" not in indata:
indata["vnf"] = []
- indata["vnf"].append({
- "member-vnf-index": member_vnf_index,
- "internal-vld": [{key: iface_info[key] for key in
- ("name", "vim-network-name", "vim-network-id") if iface_info.get(key)}]
- })
+ indata["vnf"].append(
+ {
+ "member-vnf-index": member_vnf_index,
+ "internal-vld": [
+ {
+ key: iface_info[key]
+ for key in (
+ "name",
+ "vim-network-name",
+ "vim-network-id",
+ )
+ if iface_info.get(key)
+ }
+ ],
+ }
+ )
@staticmethod
def _create_nslcmop(nsr_id, operation, params):
"""
Creates a ns-lcm-opp content to be stored at database.
:param nsr_id: internal id of the instance
- :param operation: instantiate, terminate, scale, action, ...
+ :param operation: instantiate, terminate, scale, action, update ...
:param params: user parameters for the operation
:return: dictionary following SOL005 format
"""
"links": {
"self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
"nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
- }
+ },
}
return nslcmop
vims = self.db.get_list("vim_accounts", db_filter)
vimAccounts = []
for vim in vims:
- vimAccounts.append(vim['_id'])
+ vimAccounts.append(vim["_id"])
return vimAccounts
- def new(self, rollback, session, indata=None, kwargs=None, headers=None, slice_object=False):
+ def new(
+ self,
+ rollback,
+ session,
+ indata=None,
+ kwargs=None,
+ headers=None,
+ slice_object=False,
+ ):
"""
Performs a new operation over a ns
:param rollback: list to append created items at database in case a rollback must to be done
:param session: contains "username", "admin", "force", "public", "project_id", "set_project"
:param indata: descriptor with the parameters of the operation. It must contains among others
nsInstanceId: _id of the nsr to perform the operation
- operation: it can be: instantiate, terminate, action, TODO: update, heal
+ operation: it can be: instantiate, terminate, action, update TODO: heal
:param kwargs: used to override the indata descriptor
:param headers: http request headers
:return: id of the nslcmops
"""
+
def check_if_nsr_is_not_slice_member(session, nsr_id):
nsis = None
db_filter = self._get_project_filter(session)
db_filter["_admin.nsrs-detailed-list.ANYINDEX.nsrId"] = nsr_id
- nsis = self.db.get_one("nsis", db_filter, fail_on_empty=False, fail_on_more=False)
+ nsis = self.db.get_one(
+ "nsis", db_filter, fail_on_empty=False, fail_on_more=False
+ )
if nsis:
- raise EngineException("The NS instance {} cannot be terminated because is used by the slice {}".format(
- nsr_id, nsis["_id"]), http_code=HTTPStatus.CONFLICT)
+ raise EngineException(
+ "The NS instance {} cannot be terminated because is used by the slice {}".format(
+ nsr_id, nsis["_id"]
+ ),
+ http_code=HTTPStatus.CONFLICT,
+ )
try:
# Override descriptor with query string kwargs
# initial checking
if operation == "terminate" and slice_object is False:
check_if_nsr_is_not_slice_member(session, nsr["_id"])
- if not nsr["_admin"].get("nsState") or nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
+ if (
+ not nsr["_admin"].get("nsState")
+ or nsr["_admin"]["nsState"] == "NOT_INSTANTIATED"
+ ):
if operation == "terminate" and indata.get("autoremove"):
# NSR must be deleted
- return None, None # a none in this case is used to indicate not instantiated. It can be removed
+ return (
+ None,
+ None,
+ ) # a none in this case is used to indicate not instantiated. It can be removed
if operation != "instantiate":
- raise EngineException("ns_instance '{}' cannot be '{}' because it is not instantiated".format(
- nsInstanceId, operation), HTTPStatus.CONFLICT)
+ raise EngineException(
+ "ns_instance '{}' cannot be '{}' because it is not instantiated".format(
+ nsInstanceId, operation
+ ),
+ HTTPStatus.CONFLICT,
+ )
else:
if operation == "instantiate" and not session["force"]:
- raise EngineException("ns_instance '{}' cannot be '{}' because it is already instantiated".format(
- nsInstanceId, operation), HTTPStatus.CONFLICT)
+ raise EngineException(
+ "ns_instance '{}' cannot be '{}' because it is already instantiated".format(
+ nsInstanceId, operation
+ ),
+ HTTPStatus.CONFLICT,
+ )
self._check_ns_operation(session, nsr, operation, indata)
+ if (indata.get("primitive_params")):
+ indata["primitive_params"] = json.dumps(indata["primitive_params"])
+ elif (indata.get("additionalParamsForVnf")):
+ indata["additionalParamsForVnf"] = json.dumps(indata["additionalParamsForVnf"])
if operation == "instantiate":
+ self._update_vnfrs_from_nsd(nsr)
self._update_vnfrs(session, rollback, nsr, indata)
-
+ if (operation == "update") and (indata["updateType"] == "CHANGE_VNFPKG"):
+ nsr_update = {}
+ vnfd_id = indata["changeVnfPackageData"]["vnfdId"]
+ vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
+ nsd = self.db.get_one("nsds", {"_id": nsr["nsd-id"]})
+ ns_request = nsr["instantiate_params"]
+ vnfr = self.db.get_one("vnfrs", {"_id": indata["changeVnfPackageData"]["vnfInstanceId"]})
+ latest_vnfd_revision = vnfd["_admin"].get("revision", 1)
+ vnfr_vnfd_revision = vnfr.get("revision", 1)
+ if latest_vnfd_revision != vnfr_vnfd_revision:
+ old_vnfd_id = vnfd_id + ":" + str(vnfr_vnfd_revision)
+ old_db_vnfd = self.db.get_one("vnfds_revisions", {"_id": old_vnfd_id})
+ old_sw_version = old_db_vnfd.get("software-version", "1.0")
+ new_sw_version = vnfd.get("software-version", "1.0")
+ if new_sw_version != old_sw_version:
+ vnf_index = vnfr["member-vnf-index-ref"]
+ self.logger.info("nsr {}".format(nsr))
+ for vdu in vnfd["vdu"]:
+ self.nsrtopic._add_flavor_to_nsr(vdu, vnfd, nsr)
+ sw_image_id = vdu.get("sw-image-desc")
+ if sw_image_id:
+ image_data = self.nsrtopic._get_image_data_from_vnfd(vnfd, sw_image_id)
+ self.nsrtopic._add_image_to_nsr(nsr, image_data)
+ for alt_image in vdu.get("alternative-sw-image-desc", ()):
+ image_data = self.nsrtopic._get_image_data_from_vnfd(vnfd, alt_image)
+ self.nsrtopic._add_image_to_nsr(nsr, image_data)
+ nsr_update["image"] = nsr["image"]
+ nsr_update["flavor"] = nsr["flavor"]
+ self.db.set_one("nsrs", {"_id": nsr["_id"]}, nsr_update)
+ ns_k8s_namespace = self.nsrtopic._get_ns_k8s_namespace(nsd, ns_request, session)
+ vnfr_descriptor = self.nsrtopic._create_vnfr_descriptor_from_vnfd(
+ nsd,
+ vnfd,
+ vnfd_id,
+ vnf_index,
+ nsr,
+ ns_request,
+ ns_k8s_namespace,
+ )
+ indata["newVdur"] = vnfr_descriptor["vdur"]
nslcmop_desc = self._create_nslcmop(nsInstanceId, operation, indata)
_id = nslcmop_desc["_id"]
- self.format_on_new(nslcmop_desc, session["project_id"], make_public=session["public"])
+ self.format_on_new(
+ nslcmop_desc, session["project_id"], make_public=session["public"]
+ )
if indata.get("placement-engine"):
# Save valid vim accounts in lcm operation descriptor
- nslcmop_desc['operationParams']['validVimAccounts'] = self._get_enabled_vims(session)
+ nslcmop_desc["operationParams"][
+ "validVimAccounts"
+ ] = self._get_enabled_vims(session)
self.db.create("nslcmops", nslcmop_desc)
rollback.append({"topic": "nslcmops", "_id": _id})
if not slice_object:
# raise EngineException("Cannot get ns_instance '{}': {}".format(e), HTTPStatus.NOT_FOUND)
def delete(self, session, _id, dry_run=False, not_send_msg=None):
- raise EngineException("Method delete called directly", HTTPStatus.INTERNAL_SERVER_ERROR)
+ raise EngineException(
+ "Method delete called directly", HTTPStatus.INTERNAL_SERVER_ERROR
+ )
def edit(self, session, _id, indata=None, kwargs=None, content=None):
- raise EngineException("Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR)
+ raise EngineException(
+ "Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR
+ )
class NsiTopic(BaseTopic):
if additional_params:
for k, v in additional_params.items():
if not isinstance(k, str):
- raise EngineException("Invalid param at additionalParamsForNsi:{}. Only string keys are allowed".
- format(k))
+ raise EngineException(
+ "Invalid param at additionalParamsForNsi:{}. Only string keys are allowed".format(
+ k
+ )
+ )
if "." in k or "$" in k:
- raise EngineException("Invalid param at additionalParamsForNsi:{}. Keys must not contain dots or $".
- format(k))
+ raise EngineException(
+ "Invalid param at additionalParamsForNsi:{}. Keys must not contain dots or $".format(
+ k
+ )
+ )
if isinstance(v, (dict, tuple, list)):
additional_params[k] = "!!yaml " + safe_dump(v)
return additional_params
return
nstd_id = descriptor["nst-ref"]
if not self.get_item_list(session, "nsts", {"id": nstd_id}):
- raise EngineException("Descriptor error at nst-ref='{}' references a non exist nstd".format(nstd_id),
- http_code=HTTPStatus.CONFLICT)
+ raise EngineException(
+ "Descriptor error at nst-ref='{}' references a non exist nstd".format(
+ nstd_id
+ ),
+ http_code=HTTPStatus.CONFLICT,
+ )
def check_conflict_on_del(self, session, _id, db_content):
"""
return
nsi = db_content
if nsi["_admin"].get("nsiState") == "INSTANTIATED":
- raise EngineException("nsi '{}' cannot be deleted because it is in 'INSTANTIATED' state. "
- "Launch 'terminate' operation first; or force deletion".format(_id),
- http_code=HTTPStatus.CONFLICT)
+ raise EngineException(
+ "nsi '{}' cannot be deleted because it is in 'INSTANTIATED' state. "
+ "Launch 'terminate' operation first; or force deletion".format(_id),
+ http_code=HTTPStatus.CONFLICT,
+ )
def delete_extra(self, session, _id, db_content, not_send_msg=None):
"""
for nsrs_detailed_item in nsir["_admin"]["nsrs-detailed-list"]:
nsr_id = nsrs_detailed_item["nsrId"]
if nsrs_detailed_item.get("shared"):
- _filter = {"_admin.nsrs-detailed-list.ANYINDEX.shared": True,
- "_admin.nsrs-detailed-list.ANYINDEX.nsrId": nsr_id,
- "_id.ne": nsir["_id"]}
- nsi = self.db.get_one("nsis", _filter, fail_on_empty=False, fail_on_more=False)
+ _filter = {
+ "_admin.nsrs-detailed-list.ANYINDEX.shared": True,
+ "_admin.nsrs-detailed-list.ANYINDEX.nsrId": nsr_id,
+ "_id.ne": nsir["_id"],
+ }
+ nsi = self.db.get_one(
+ "nsis", _filter, fail_on_empty=False, fail_on_more=False
+ )
if nsi: # last one using nsr
continue
try:
- self.nsrTopic.delete(session, nsr_id, dry_run=False, not_send_msg=not_send_msg)
+ self.nsrTopic.delete(
+ session, nsr_id, dry_run=False, not_send_msg=not_send_msg
+ )
except (DbException, EngineException) as e:
if e.http_code == HTTPStatus.NOT_FOUND:
pass
nsir_admin = nsir.get("_admin")
if nsir_admin and nsir_admin.get("nst-id"):
# check if used by another NSI
- nsis_list = self.db.get_one("nsis", {"nst-id": nsir_admin["nst-id"]},
- fail_on_empty=False, fail_on_more=False)
+ nsis_list = self.db.get_one(
+ "nsis",
+ {"nst-id": nsir_admin["nst-id"]},
+ fail_on_empty=False,
+ fail_on_more=False,
+ )
if not nsis_list:
- self.db.set_one("nsts", {"_id": nsir_admin["nst-id"]}, {"_admin.usageState": "NOT_IN_USE"})
+ self.db.set_one(
+ "nsts",
+ {"_id": nsir_admin["nst-id"]},
+ {"_admin.usageState": "NOT_IN_USE"},
+ )
def new(self, rollback, session, indata=None, kwargs=None, headers=None):
"""
slice_request = self._validate_input_new(slice_request, session["force"])
# look for nstd
- step = "getting nstd id='{}' from database".format(slice_request.get("nstId"))
+ step = "getting nstd id='{}' from database".format(
+ slice_request.get("nstId")
+ )
_filter = self._get_project_filter(session)
_filter["_id"] = slice_request["nstId"]
nstd = self.db.get_one("nsts", _filter)
# check NST is not disabled
step = "checking NST operationalState"
if nstd["_admin"]["operationalState"] == "DISABLED":
- raise EngineException("nst with id '{}' is DISABLED, and thus cannot be used to create a netslice "
- "instance".format(slice_request["nstId"]), http_code=HTTPStatus.CONFLICT)
+ raise EngineException(
+ "nst with id '{}' is DISABLED, and thus cannot be used to create a netslice "
+ "instance".format(slice_request["nstId"]),
+ http_code=HTTPStatus.CONFLICT,
+ )
del _filter["_id"]
# check NSD is not disabled
step = "checking operationalState"
if nstd["_admin"]["operationalState"] == "DISABLED":
- raise EngineException("nst with id '{}' is DISABLED, and thus cannot be used to create "
- "a network slice".format(slice_request["nstId"]), http_code=HTTPStatus.CONFLICT)
+ raise EngineException(
+ "nst with id '{}' is DISABLED, and thus cannot be used to create "
+ "a network slice".format(slice_request["nstId"]),
+ http_code=HTTPStatus.CONFLICT,
+ )
nstd.pop("_admin", None)
nstd_id = nstd.pop("_id", None)
"nsr-ref-list": [],
"vlr-list": [],
"_id": nsi_id,
- "additionalParamsForNsi": self._format_addional_params(slice_request)
+ "additionalParamsForNsi": self._format_addional_params(slice_request),
}
step = "creating nsi at database"
- self.format_on_new(nsi_descriptor, session["project_id"], make_public=session["public"])
+ self.format_on_new(
+ nsi_descriptor, session["project_id"], make_public=session["public"]
+ )
nsi_descriptor["_admin"]["nsiState"] = "NOT_INSTANTIATED"
nsi_descriptor["_admin"]["netslice-subnet"] = None
nsi_descriptor["_admin"]["deployed"] = {}
for member_ns in nstd["netslice-subnet"]:
nsd_id = member_ns["nsd-ref"]
step = "getting nstd id='{}' constituent-nsd='{}' from database".format(
- member_ns["nsd-ref"], member_ns["id"])
+ member_ns["nsd-ref"], member_ns["id"]
+ )
if nsd_id not in needed_nsds:
# Obtain nsd
_filter["id"] = nsd_id
- nsd = self.db.get_one("nsds", _filter, fail_on_empty=True, fail_on_more=True)
+ nsd = self.db.get_one(
+ "nsds", _filter, fail_on_empty=True, fail_on_more=True
+ )
del _filter["id"]
nsd.pop("_admin")
needed_nsds[nsd_id] = nsd
services.append(member_ns)
step = "filling nsir nsd-id='{}' constituent-nsd='{}' from database".format(
- member_ns["nsd-ref"], member_ns["id"])
+ member_ns["nsd-ref"], member_ns["id"]
+ )
# creates Network Services records (NSRs)
step = "creating nsrs at database using NsrTopic.new()"
indata_ns = {}
# Is the nss shared and instantiated?
_filter["_admin.nsrs-detailed-list.ANYINDEX.shared"] = True
- _filter["_admin.nsrs-detailed-list.ANYINDEX.nsd-id"] = service["nsd-ref"]
+ _filter["_admin.nsrs-detailed-list.ANYINDEX.nsd-id"] = service[
+ "nsd-ref"
+ ]
_filter["_admin.nsrs-detailed-list.ANYINDEX.nss-id"] = service["id"]
- nsi = self.db.get_one("nsis", _filter, fail_on_empty=False, fail_on_more=False)
+ nsi = self.db.get_one(
+ "nsis", _filter, fail_on_empty=False, fail_on_more=False
+ )
if nsi and service.get("is-shared-nss"):
nsrs_detailed_list = nsi["_admin"]["nsrs-detailed-list"]
for nsrs_detailed_item in nsrs_detailed_list:
if service.get("instantiation-parameters"):
indata_ns = deepcopy(service["instantiation-parameters"])
# del service["instantiation-parameters"]
-
+
indata_ns["nsdId"] = service["_id"]
- indata_ns["nsName"] = slice_request.get("nsiName") + "." + service["id"]
+ indata_ns["nsName"] = (
+ slice_request.get("nsiName") + "." + service["id"]
+ )
indata_ns["vimAccountId"] = slice_request.get("vimAccountId")
indata_ns["nsDescription"] = service["description"]
if slice_request.get("ssh_keys"):
copy_ns_param = deepcopy(ns_param)
del copy_ns_param["id"]
indata_ns.update(copy_ns_param)
- break
+ break
# Creates Nsr objects
- _id_nsr, _ = self.nsrTopic.new(rollback, session, indata_ns, kwargs, headers)
- nsrs_item = {"nsrId": _id_nsr, "shared": service.get("is-shared-nss"), "nsd-id": service["nsd-ref"],
- "nss-id": service["id"], "nslcmop_instantiate": None}
+ _id_nsr, _ = self.nsrTopic.new(
+ rollback, session, indata_ns, kwargs, headers
+ )
+ nsrs_item = {
+ "nsrId": _id_nsr,
+ "shared": service.get("is-shared-nss"),
+ "nsd-id": service["nsd-ref"],
+ "nss-id": service["id"],
+ "nslcmop_instantiate": None,
+ }
indata_ns["nss-id"] = service["id"]
nsrs_list.append(nsrs_item)
nsi_netslice_subnet.append(indata_ns)
# Adding the nsrs list to the nsi
nsi_descriptor["_admin"]["nsrs-detailed-list"] = nsrs_list
nsi_descriptor["_admin"]["netslice-subnet"] = nsi_netslice_subnet
- self.db.set_one("nsts", {"_id": slice_request["nstId"]}, {"_admin.usageState": "IN_USE"})
+ self.db.set_one(
+ "nsts", {"_id": slice_request["nstId"]}, {"_admin.usageState": "IN_USE"}
+ )
# Creating the entry in the database
self.db.create("nsis", nsi_descriptor)
rollback.append({"topic": "nsis", "_id": nsi_id})
return nsi_id, None
- except Exception as e: # TODO remove try Except, it is captured at nbi.py
- self.logger.exception("Exception {} at NsiTopic.new()".format(e), exc_info=True)
+ except Exception as e: # TODO remove try Except, it is captured at nbi.py
+ self.logger.exception(
+ "Exception {} at NsiTopic.new()".format(e), exc_info=True
+ )
raise EngineException("Error {}: {}".format(step, e))
except ValidationError as e:
raise EngineException(e, HTTPStatus.UNPROCESSABLE_ENTITY)
def edit(self, session, _id, indata=None, kwargs=None, content=None):
- raise EngineException("Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR)
+ raise EngineException(
+ "Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR
+ )
class NsiLcmOpTopic(BaseTopic):
topic_msg = "nsi"
operation_schema = { # mapping between operation and jsonschema to validate
"instantiate": nsi_instantiate,
- "terminate": None
+ "terminate": None,
}
-
+
def __init__(self, db, fs, msg, auth):
BaseTopic.__init__(self, db, fs, msg, auth)
self.nsi_NsLcmOpTopic = NsLcmOpTopic(self.db, self.fs, self.msg, self.auth)
nsds[nsd_id] = self.db.get_one("nsds", _filter)
return nsds[nsd_id]
else:
- raise EngineException("Invalid parameter nstId='{}' is not one of the "
- "nst:netslice-subnet".format(nstId))
+ raise EngineException(
+ "Invalid parameter nstId='{}' is not one of the "
+ "nst:netslice-subnet".format(nstId)
+ )
+
if operation == "instantiate":
# check the existance of netslice-subnet items
- for in_nst in get_iterable(indata.get("netslice-subnet")):
+ for in_nst in get_iterable(indata.get("netslice-subnet")):
check_valid_netslice_subnet_id(in_nst["id"])
def _create_nsilcmop(self, session, netsliceInstanceId, operation, params):
"isCancelPending": False,
"links": {
"self": "/osm/nsilcm/v1/nsi_lcm_op_occs/" + _id,
- "netsliceInstanceId": "/osm/nsilcm/v1/netslice_instances/" + netsliceInstanceId,
- }
+ "netsliceInstanceId": "/osm/nsilcm/v1/netslice_instances/"
+ + netsliceInstanceId,
+ },
}
return nsilcmop
for admin_subnet_item in nsir["_admin"].get("netslice-subnet"):
if admin_subnet_item["nss-id"] == nst_sb_item["id"]:
for admin_vld_item in nsir["_admin"].get("netslice-vld"):
- for admin_vld_nss_cp_ref_item in admin_vld_item["nss-connection-point-ref"]:
- if admin_subnet_item["nss-id"] == admin_vld_nss_cp_ref_item["nss-ref"]:
- if not nsr_item["nsrId"] in admin_vld_item["shared-nsrs-list"]:
- admin_vld_item["shared-nsrs-list"].append(nsr_item["nsrId"])
+ for admin_vld_nss_cp_ref_item in admin_vld_item[
+ "nss-connection-point-ref"
+ ]:
+ if (
+ admin_subnet_item["nss-id"]
+ == admin_vld_nss_cp_ref_item["nss-ref"]
+ ):
+ if (
+ not nsr_item["nsrId"]
+ in admin_vld_item["shared-nsrs-list"]
+ ):
+ admin_vld_item["shared-nsrs-list"].append(
+ nsr_item["nsrId"]
+ )
break
# self.db.set_one("nsis", {"_id": nsir["_id"]}, nsir)
- self.db.set_one("nsis", {"_id": nsir["_id"]}, {"_admin.netslice-vld": nsir["_admin"].get("netslice-vld")})
+ self.db.set_one(
+ "nsis",
+ {"_id": nsir["_id"]},
+ {"_admin.netslice-vld": nsir["_admin"].get("netslice-vld")},
+ )
def new(self, rollback, session, indata=None, kwargs=None, headers=None):
"""
del _filter["_id"]
# initial checking
- if not nsir["_admin"].get("nsiState") or nsir["_admin"]["nsiState"] == "NOT_INSTANTIATED":
+ if (
+ not nsir["_admin"].get("nsiState")
+ or nsir["_admin"]["nsiState"] == "NOT_INSTANTIATED"
+ ):
if operation == "terminate" and indata.get("autoremove"):
# NSIR must be deleted
- return None, None # a none in this case is used to indicate not instantiated. It can be removed
+ return (
+ None,
+ None,
+ ) # a none in this case is used to indicate not instantiated. It can be removed
if operation != "instantiate":
- raise EngineException("netslice_instance '{}' cannot be '{}' because it is not instantiated".format(
- netsliceInstanceId, operation), HTTPStatus.CONFLICT)
+ raise EngineException(
+ "netslice_instance '{}' cannot be '{}' because it is not instantiated".format(
+ netsliceInstanceId, operation
+ ),
+ HTTPStatus.CONFLICT,
+ )
else:
if operation == "instantiate" and not session["force"]:
- raise EngineException("netslice_instance '{}' cannot be '{}' because it is already instantiated".
- format(netsliceInstanceId, operation), HTTPStatus.CONFLICT)
-
+ raise EngineException(
+ "netslice_instance '{}' cannot be '{}' because it is already instantiated".format(
+ netsliceInstanceId, operation
+ ),
+ HTTPStatus.CONFLICT,
+ )
+
# Creating all the NS_operation (nslcmop)
# Get service list from db
nsrs_list = nsir["_admin"]["nsrs-detailed-list"]
if nsr_item.get("shared"):
_filter["_admin.nsrs-detailed-list.ANYINDEX.shared"] = True
_filter["_admin.nsrs-detailed-list.ANYINDEX.nsrId"] = nsr_id
- _filter["_admin.nsrs-detailed-list.ANYINDEX.nslcmop_instantiate.ne"] = None
+ _filter[
+ "_admin.nsrs-detailed-list.ANYINDEX.nslcmop_instantiate.ne"
+ ] = None
_filter["_id.ne"] = netsliceInstanceId
- nsi = self.db.get_one("nsis", _filter, fail_on_empty=False, fail_on_more=False)
+ nsi = self.db.get_one(
+ "nsis", _filter, fail_on_empty=False, fail_on_more=False
+ )
if operation == "terminate":
- _update = {"_admin.nsrs-detailed-list.{}.nslcmop_instantiate".format(index): None}
+ _update = {
+ "_admin.nsrs-detailed-list.{}.nslcmop_instantiate".format(
+ index
+ ): None
+ }
self.db.set_one("nsis", {"_id": nsir["_id"]}, _update)
- if nsi: # other nsi is using this nsr and it needs this nsr instantiated
+ if (
+ nsi
+ ): # other nsi is using this nsr and it needs this nsr instantiated
continue # do not create nsilcmop
else: # instantiate
# looks the first nsi fulfilling the conditions but not being the current NSIR
if nsi:
- nsi_nsr_item = next(n for n in nsi["_admin"]["nsrs-detailed-list"] if
- n["nsrId"] == nsr_id and n["shared"] and
- n["nslcmop_instantiate"])
+ nsi_nsr_item = next(
+ n
+ for n in nsi["_admin"]["nsrs-detailed-list"]
+ if n["nsrId"] == nsr_id
+ and n["shared"]
+ and n["nslcmop_instantiate"]
+ )
self.add_shared_nsr_2vld(nsir, nsr_item)
nslcmops.append(nsi_nsr_item["nslcmop_instantiate"])
- _update = {"_admin.nsrs-detailed-list.{}".format(index): nsi_nsr_item}
+ _update = {
+ "_admin.nsrs-detailed-list.{}".format(
+ index
+ ): nsi_nsr_item
+ }
self.db.set_one("nsis", {"_id": nsir["_id"]}, _update)
# continue to not create nslcmop since nsrs is shared and nsrs was created
continue
# Creating NS_LCM_OP with the flag slice_object=True to not trigger the service instantiation
# message via kafka bus
- nslcmop, _ = self.nsi_NsLcmOpTopic.new(rollback, session, indata_ns, None, headers,
- slice_object=True)
+ nslcmop, _ = self.nsi_NsLcmOpTopic.new(
+ rollback, session, indata_ns, None, headers, slice_object=True
+ )
nslcmops.append(nslcmop)
if operation == "instantiate":
- _update = {"_admin.nsrs-detailed-list.{}.nslcmop_instantiate".format(index): nslcmop}
+ _update = {
+ "_admin.nsrs-detailed-list.{}.nslcmop_instantiate".format(
+ index
+ ): nslcmop
+ }
self.db.set_one("nsis", {"_id": nsir["_id"]}, _update)
except (DbException, EngineException) as e:
if e.http_code == HTTPStatus.NOT_FOUND:
- self.logger.info(logging_prefix + "skipping NS={} because not found".format(nsr_id))
+ self.logger.info(
+ logging_prefix
+ + "skipping NS={} because not found".format(nsr_id)
+ )
pass
else:
raise
indata["nslcmops_ids"] = nslcmops
self._check_nsi_operation(session, nsir, operation, indata)
- nsilcmop_desc = self._create_nsilcmop(session, netsliceInstanceId, operation, indata)
- self.format_on_new(nsilcmop_desc, session["project_id"], make_public=session["public"])
+ nsilcmop_desc = self._create_nsilcmop(
+ session, netsliceInstanceId, operation, indata
+ )
+ self.format_on_new(
+ nsilcmop_desc, session["project_id"], make_public=session["public"]
+ )
_id = self.db.create("nsilcmops", nsilcmop_desc)
rollback.append({"topic": "nsilcmops", "_id": _id})
self.msg.write("nsi", operation, nsilcmop_desc)
raise EngineException(e, HTTPStatus.UNPROCESSABLE_ENTITY)
def delete(self, session, _id, dry_run=False, not_send_msg=None):
- raise EngineException("Method delete called directly", HTTPStatus.INTERNAL_SERVER_ERROR)
+ raise EngineException(
+ "Method delete called directly", HTTPStatus.INTERNAL_SERVER_ERROR
+ )
def edit(self, session, _id, indata=None, kwargs=None, content=None):
- raise EngineException("Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR)
+ raise EngineException(
+ "Method edit called directly", HTTPStatus.INTERNAL_SERVER_ERROR
+ )